You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2013/06/14 23:19:25 UTC

svn commit: r1493250 [2/3] - in /accumulo/trunk: core/src/main/java/org/apache/accumulo/core/ core/src/main/java/org/apache/accumulo/core/cli/ core/src/main/java/org/apache/accumulo/core/client/admin/ core/src/main/java/org/apache/accumulo/core/client/...

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java Fri Jun 14 21:19:23 2013
@@ -37,6 +37,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.WholeRowIterator;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
@@ -53,7 +54,7 @@ public class MetaDataTableScanner implem
     // scan over metadata table, looking for tablets in the wrong state based on the live servers and online tables
     try {
       Connector connector = instance.getConnector(auths.getPrincipal(), CredentialHelper.extractToken(auths));
-      mdScanner = connector.createBatchScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS, 8);
+      mdScanner = connector.createBatchScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY, 8);
       configureScanner(mdScanner, state);
       mdScanner.setRanges(Collections.singletonList(range));
       iter = mdScanner.iterator();
@@ -62,7 +63,7 @@ public class MetaDataTableScanner implem
       throw new RuntimeException(ex);
     }
   }
-
+  
   static public void configureScanner(ScannerBase scanner, CurrentState state) {
     Constants.METADATA_PREV_ROW_COLUMN.fetch(scanner);
     scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
@@ -90,6 +91,7 @@ public class MetaDataTableScanner implem
     }
   }
   
+  @Override
   public void finalize() {
     close();
   }
@@ -114,7 +116,7 @@ public class MetaDataTableScanner implem
       log.error(ex, ex);
       mdScanner.close();
       return null;
-    } 
+    }
   }
   
   public static TabletLocationState createTabletLocationState(Key k, Value v) throws IOException, BadLocationStateException {
@@ -174,7 +176,7 @@ public class MetaDataTableScanner implem
       throw new RuntimeException(ex);
     } catch (BadLocationStateException ex) {
       throw new RuntimeException(ex);
-    } 
+    }
   }
   
   @Override

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java Fri Jun 14 21:19:23 2013
@@ -35,8 +35,6 @@ import java.util.concurrent.ExecutorServ
 import java.util.concurrent.Future;
 import java.util.concurrent.ThreadPoolExecutor;
 
-import org.apache.accumulo.trace.instrument.TraceExecutorService;
-import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
@@ -56,6 +54,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.SimpleThreadPool;
 import org.apache.accumulo.core.util.UtilWaitThread;
@@ -71,6 +70,8 @@ import org.apache.accumulo.server.tablet
 import org.apache.accumulo.server.util.MetadataTable;
 import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
+import org.apache.accumulo.trace.instrument.TraceExecutorService;
+import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -81,7 +82,6 @@ import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
 
-
 /*
  * Bulk import makes requests of tablet servers, and those requests can take a
  * long time. Our communications to the tablet server may fail, so we won't know
@@ -104,7 +104,7 @@ import org.apache.thrift.TException;
 
 public class BulkImport extends MasterRepo {
   public static final String FAILURES_TXT = "failures.txt";
-
+  
   private static final long serialVersionUID = 1L;
   
   private static final Logger log = Logger.getLogger(BulkImport.class);
@@ -147,7 +147,7 @@ public class BulkImport extends MasterRe
     
     // check that the error directory exists and is empty
     FileSystem fs = master.getFileSystem();
-
+    
     Path errorPath = new Path(errorDir);
     FileStatus errorStatus = null;
     try {
@@ -334,7 +334,7 @@ class CompleteBulkImport extends MasterR
 class CopyFailed extends MasterRepo {
   
   private static final long serialVersionUID = 1L;
-
+  
   private String tableId;
   private String source;
   private String bulk;
@@ -367,10 +367,10 @@ class CopyFailed extends MasterRepo {
   
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
-	//This needs to execute after the arbiter is stopped  
-	  
+    // This needs to execute after the arbiter is stopped
+    
     FileSystem fs = master.getFileSystem();
-	  
+    
     if (!fs.exists(new Path(error, BulkImport.FAILURES_TXT)))
       return new CleanUpBulkImport(tableId, source, bulk, error);
     
@@ -394,10 +394,10 @@ class CopyFailed extends MasterRepo {
      * I thought I could move files that have no file references in the table. However its possible a clone references a file. Therefore only move files that
      * have no loaded markers.
      */
-
+    
     // determine which failed files were loaded
     Connector conn = master.getConnector();
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS));
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
     mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
     mscanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
     
@@ -439,7 +439,7 @@ class CopyFailed extends MasterRepo {
       
       bifCopyQueue.waitUntilDone(workIds);
     }
-
+    
     fs.delete(new Path(error, BulkImport.FAILURES_TXT), true);
     return new CleanUpBulkImport(tableId, source, bulk, error);
   }
@@ -452,7 +452,7 @@ class LoadFiles extends MasterRepo {
   
   private static ExecutorService threadPool = null;
   static {
-
+    
   }
   private static final Logger log = Logger.getLogger(BulkImport.class);
   
@@ -485,7 +485,7 @@ class LoadFiles extends MasterRepo {
       threadPool = new TraceExecutorService(pool);
     }
   }
-
+  
   @Override
   public Repo<Master> call(final long tid, final Master master) throws Exception {
     initializeThreadPool(master);
@@ -496,7 +496,7 @@ class LoadFiles extends MasterRepo {
       files.add(entry);
     }
     log.debug("tid " + tid + " importing " + files.size() + " files");
-
+    
     Path writable = new Path(this.errorDir, ".iswritable");
     if (!fs.createNewFile(writable)) {
       // Maybe this is a re-try... clear the flag and try again
@@ -576,7 +576,7 @@ class LoadFiles extends MasterRepo {
     } finally {
       out.close();
     }
-
+    
     // return the next step, which will perform cleanup
     return new CompleteBulkImport(tableId, source, bulk, errorDir);
   }
@@ -600,5 +600,5 @@ class LoadFiles extends MasterRepo {
     result.append("]");
     return result.toString();
   }
-
+  
 }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java Fri Jun 14 21:19:23 2013
@@ -41,6 +41,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
@@ -87,10 +88,10 @@ class CompactionDriver extends MasterRep
       // compaction was canceled
       throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Compaction canceled");
     }
-
+    
     MapCounter<TServerInstance> serversToFlush = new MapCounter<TServerInstance>();
     Connector conn = master.getConnector();
-    Scanner scanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS));
+    Scanner scanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
     
     Range range = new KeyExtent(new Text(tableId), null, startRow == null ? null : new Text(startRow)).toMetadataRange();
     
@@ -189,7 +190,6 @@ class CompactionDriver extends MasterRep
   
 }
 
-
 public class CompactRange extends MasterRepo {
   
   private static final long serialVersionUID = 1L;
@@ -214,7 +214,7 @@ public class CompactRange extends Master
       endRow = null;
       iterators = Collections.emptyList();
     }
-
+    
     @Override
     public void write(DataOutput out) throws IOException {
       out.writeBoolean(startRow != null);
@@ -275,7 +275,7 @@ public class CompactRange extends Master
       return iterators;
     }
   }
-
+  
   public CompactRange(String tableId, byte[] startRow, byte[] endRow, List<IteratorSetting> iterators) throws ThriftTableOperationException {
     this.tableId = tableId;
     this.startRow = startRow.length == 0 ? null : startRow;
@@ -286,7 +286,7 @@ public class CompactRange extends Master
     } else {
       iterators = null;
     }
-
+    
     if (this.startRow != null && this.endRow != null && new Text(startRow).compareTo(new Text(endRow)) >= 0)
       throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.BAD_RANGE,
           "start row must be less than end row");
@@ -317,13 +317,13 @@ public class CompactRange extends Master
           for (int i = 1; i < tokens.length; i++) {
             if (tokens[i].startsWith(txidString))
               continue; // skip self
-
+              
             throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT, TableOperationExceptionType.OTHER,
                 "Another compaction with iterators is running");
           }
-
+          
           StringBuilder encodedIterators = new StringBuilder();
-
+          
           if (iterators != null) {
             Hex hex = new Hex();
             encodedIterators.append(",");
@@ -354,7 +354,7 @@ public class CompactRange extends Master
         String cvs = new String(currentValue);
         String[] tokens = cvs.split(",");
         long flushID = Long.parseLong(new String(tokens[0]));
-
+        
         String txidString = String.format("%016x", txid);
         
         StringBuilder encodedIterators = new StringBuilder();
@@ -368,9 +368,9 @@ public class CompactRange extends Master
         return ("" + flushID + encodedIterators).getBytes();
       }
     });
-
+    
   }
-
+  
   @Override
   public void undo(long tid, Master environment) throws Exception {
     try {

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java Fri Jun 14 21:19:23 2013
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.data.Ran
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.GrepIterator;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.server.ServerConstants;
@@ -88,7 +89,7 @@ class CleanUp extends MasterRepo {
     
     boolean done = true;
     Range tableRange = new KeyExtent(new Text(tableId), null, null).toMetadataRange();
-    Scanner scanner = master.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = master.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     MetaDataTableScanner.configureScanner(scanner, master);
     scanner.setRange(tableRange);
     
@@ -126,7 +127,7 @@ class CleanUp extends MasterRepo {
     try {
       // look for other tables that references this tables files
       Connector conn = master.getConnector();
-      BatchScanner bs = conn.createBatchScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS, 8);
+      BatchScanner bs = conn.createBatchScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY, 8);
       try {
         bs.setRanges(Collections.singleton(Constants.NON_ROOT_METADATA_KEYSPACE));
         bs.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java Fri Jun 14 21:19:23 2013
@@ -46,6 +46,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.conf.ServerConfiguration;
@@ -95,7 +96,7 @@ class WriteExportFiles extends MasterRep
     
     checkOffline(conn);
     
-    Scanner metaScanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner metaScanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     metaScanner.setRange(new KeyExtent(new Text(tableInfo.tableID), null, null).toMetadataRange());
     
     // scan for locations
@@ -217,7 +218,7 @@ class WriteExportFiles extends MasterRep
     
     Map<String,String> uniqueFiles = new HashMap<String,String>();
     
-    Scanner metaScanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner metaScanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     metaScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
     Constants.METADATA_PREV_ROW_COLUMN.fetch(metaScanner);
     Constants.METADATA_TIME_COLUMN.fetch(metaScanner);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/master/tableOps/TraceRepo.java Fri Jun 14 21:19:23 2013
@@ -16,12 +16,11 @@
  */
 package org.apache.accumulo.server.master.tableOps;
 
+import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.trace.instrument.Span;
 import org.apache.accumulo.trace.instrument.Trace;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.accumulo.trace.thrift.TInfo;
-import org.apache.accumulo.fate.Repo;
-
 
 /**
  * 
@@ -29,7 +28,7 @@ import org.apache.accumulo.fate.Repo;
 public class TraceRepo<T> implements Repo<T> {
   
   private static final long serialVersionUID = 1L;
-
+  
   TInfo tinfo;
   Repo<T> repo;
   
@@ -38,11 +37,6 @@ public class TraceRepo<T> implements Rep
     tinfo = Tracer.traceInfo();
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#isReady(long, java.lang.Object)
-   */
   @Override
   public long isReady(long tid, T environment) throws Exception {
     Span span = Trace.trace(tinfo, repo.getDescription());
@@ -53,11 +47,6 @@ public class TraceRepo<T> implements Rep
     }
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#call(long, java.lang.Object)
-   */
   @Override
   public Repo<T> call(long tid, T environment) throws Exception {
     Span span = Trace.trace(tinfo, repo.getDescription());
@@ -71,11 +60,6 @@ public class TraceRepo<T> implements Rep
     }
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#undo(long, java.lang.Object)
-   */
   @Override
   public void undo(long tid, T environment) throws Exception {
     Span span = Trace.trace(tinfo, repo.getDescription());
@@ -86,24 +70,14 @@ public class TraceRepo<T> implements Rep
     }
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#getDescription()
-   */
   @Override
   public String getDescription() {
     return repo.getDescription();
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.fate.Repo#getReturn()
-   */
   @Override
   public String getReturn() {
     return repo.getReturn();
   }
-
+  
 }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java Fri Jun 14 21:19:23 2013
@@ -39,6 +39,7 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.SortedKeyIterator;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.LoggingRunnable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
@@ -153,7 +154,7 @@ public class ProblemReports implements I
     }
     
     Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
-    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     scanner.addScanIterator(new IteratorSetting(1, "keys-only", SortedKeyIterator.class));
     
     if (table == null) {
@@ -208,7 +209,7 @@ public class ProblemReports implements I
             try {
               if ((table == null || !table.equals(Constants.METADATA_TABLE_ID)) && iter1Count == 0) {
                 Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
-                Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+                Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
                 
                 scanner.setTimeout(3, TimeUnit.SECONDS);
                 

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java Fri Jun 14 21:19:23 2013
@@ -105,6 +105,7 @@ public class SecurityOperation {
    * 
    * @deprecated not for client use
    */
+  @Deprecated
   public SecurityOperation(String instanceId) {
     ZKUserPath = Constants.ZROOT + "/" + instanceId + "/users";
     zooCache = new ZooCache();
@@ -209,7 +210,7 @@ public class SecurityOperation {
     
     // system user doesn't need record-level authorizations for the tables it reads (for now)
     if (user.equals(SecurityConstants.SYSTEM_PRINCIPAL))
-      return Constants.NO_AUTHS;
+      return Authorizations.EMPTY;
     
     try {
       return authorizor.getCachedUserAuthorizations(user);
@@ -293,11 +294,13 @@ public class SecurityOperation {
     return hasTablePermission(credentials.getPrincipal(), table, TablePermission.READ, true);
   }
   
-  public boolean canScan(TCredentials credentials, String table, TRange range, List<TColumn> columns, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
+  public boolean canScan(TCredentials credentials, String table, TRange range, List<TColumn> columns, List<IterInfo> ssiList,
+      Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
     return canScan(credentials, table);
   }
   
-  public boolean canScan(TCredentials credentials, String table, Map<TKeyExtent,List<TRange>> tbatch, List<TColumn> tcolumns, List<IterInfo> ssiList, Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
+  public boolean canScan(TCredentials credentials, String table, Map<TKeyExtent,List<TRange>> tbatch, List<TColumn> tcolumns, List<IterInfo> ssiList,
+      Map<String,Map<String,String>> ssio, List<ByteBuffer> authorizations) throws ThriftSecurityException {
     return canScan(credentials, table);
   }
   

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/InsecurePermHandler.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/InsecurePermHandler.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/InsecurePermHandler.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/InsecurePermHandler.java Fri Jun 14 21:19:23 2013
@@ -27,120 +27,77 @@ import org.apache.accumulo.core.security
  */
 public class InsecurePermHandler implements PermissionHandler {
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#initialize(java.lang.String)
-   */
   @Override
   public void initialize(String instanceId, boolean initialize) {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#validSecurityHandlers(org.apache.accumulo.server.security.handler.Authenticator, org.apache.accumulo.server.security.handler.Authorizor)
-   */
   @Override
   public boolean validSecurityHandlers(Authenticator authent, Authorizor author) {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#initializeSecurity(java.lang.String)
-   */
   @Override
   public void initializeSecurity(TCredentials token, String rootuser) throws AccumuloSecurityException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#hasSystemPermission(java.lang.String, org.apache.accumulo.core.security.SystemPermission)
-   */
   @Override
   public boolean hasSystemPermission(String user, SystemPermission permission) throws AccumuloSecurityException {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#hasCachedSystemPermission(java.lang.String, org.apache.accumulo.core.security.SystemPermission)
-   */
   @Override
   public boolean hasCachedSystemPermission(String user, SystemPermission permission) throws AccumuloSecurityException {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#hasTablePermission(java.lang.String, java.lang.String, org.apache.accumulo.core.security.TablePermission)
-   */
   @Override
   public boolean hasTablePermission(String user, String table, TablePermission permission) throws AccumuloSecurityException, TableNotFoundException {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#hasCachedTablePermission(java.lang.String, java.lang.String, org.apache.accumulo.core.security.TablePermission)
-   */
   @Override
   public boolean hasCachedTablePermission(String user, String table, TablePermission permission) throws AccumuloSecurityException, TableNotFoundException {
     return true;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#grantSystemPermission(java.lang.String, org.apache.accumulo.core.security.SystemPermission)
-   */
   @Override
   public void grantSystemPermission(String user, SystemPermission permission) throws AccumuloSecurityException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#revokeSystemPermission(java.lang.String, org.apache.accumulo.core.security.SystemPermission)
-   */
   @Override
   public void revokeSystemPermission(String user, SystemPermission permission) throws AccumuloSecurityException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#grantTablePermission(java.lang.String, java.lang.String, org.apache.accumulo.core.security.TablePermission)
-   */
   @Override
   public void grantTablePermission(String user, String table, TablePermission permission) throws AccumuloSecurityException, TableNotFoundException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#revokeTablePermission(java.lang.String, java.lang.String, org.apache.accumulo.core.security.TablePermission)
-   */
   @Override
   public void revokeTablePermission(String user, String table, TablePermission permission) throws AccumuloSecurityException, TableNotFoundException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#cleanTablePermissions(java.lang.String)
-   */
   @Override
   public void cleanTablePermissions(String table) throws AccumuloSecurityException, TableNotFoundException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#initUser(java.lang.String)
-   */
   @Override
   public void initUser(String user) throws AccumuloSecurityException {
     return;
   }
   
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.server.security.handler.PermissionHandler#dropUser(java.lang.String)
-   */
   @Override
   public void cleanUser(String user) throws AccumuloSecurityException {
     return;
   }
-
+  
   @Override
-  public void initTable(String table) throws AccumuloSecurityException {
-  }
+  public void initTable(String table) throws AccumuloSecurityException {}
   
 }

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java Fri Jun 14 21:19:23 2013
@@ -64,7 +64,7 @@ public class ZKAuthorizor implements Aut
     byte[] authsBytes = zooCache.get(ZKUserPath + "/" + user + ZKUserAuths);
     if (authsBytes != null)
       return ZKSecurityTool.convertAuthorizations(authsBytes);
-    return Constants.NO_AUTHS;
+    return Authorizations.EMPTY;
   }
   
   @Override
@@ -90,7 +90,7 @@ public class ZKAuthorizor implements Aut
         zoo.putPersistentData(ZKUserPath, rootuser.getBytes(), NodeExistsPolicy.FAIL);
       
       initUser(rootuser);
-      zoo.putPersistentData(ZKUserPath + "/" + rootuser + ZKUserAuths, ZKSecurityTool.convertAuthorizations(Constants.NO_AUTHS), NodeExistsPolicy.FAIL);
+      zoo.putPersistentData(ZKUserPath + "/" + rootuser + ZKUserAuths, ZKSecurityTool.convertAuthorizations(Authorizations.EMPTY), NodeExistsPolicy.FAIL);
     } catch (KeeperException e) {
       log.error(e, e);
       throw new RuntimeException(e);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java Fri Jun 14 21:19:23 2013
@@ -1201,7 +1201,7 @@ public class Tablet {
       Text rowName = extent.getMetadataEntry();
       
       ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), Constants.METADATA_TABLE_ID,
-          Constants.NO_AUTHS);
+          Authorizations.EMPTY);
       
       // Commented out because when no data file is present, each tablet will scan through metadata table and return nothing
       // reduced batch size to improve performance

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java Fri Jun 14 21:19:23 2013
@@ -2051,12 +2051,6 @@ public class TabletServer extends Abstra
       
     }
     
-    /*
-     * (non-Javadoc)
-     * 
-     * @see org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Iface#removeLogs(org.apache.accumulo.trace.thrift.TInfo,
-     * org.apache.accumulo.core.security.thrift.Credentials, java.util.List)
-     */
     @Override
     public void removeLogs(TInfo tinfo, TCredentials credentials, List<String> filenames) throws TException {
       String myname = getClientAddressString();
@@ -2895,7 +2889,7 @@ public class TabletServer extends Abstra
         Constants.METADATA_SPLIT_RATIO_COLUMN, Constants.METADATA_OLD_PREV_ROW_COLUMN, Constants.METADATA_TIME_COLUMN});
     
     ScannerImpl scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), Constants.METADATA_TABLE_ID,
-        Constants.NO_AUTHS);
+        Authorizations.EMPTY);
     scanner.setRange(extent.toMetadataRange());
     
     TreeMap<Key,Value> tkv = new TreeMap<Key,Value>();

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java Fri Jun 14 21:19:23 2013
@@ -53,11 +53,11 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-//import org.apache.hadoop.fs.CreateFlag;
-//import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
 import org.apache.log4j.Logger;
+//import org.apache.hadoop.fs.CreateFlag;
+//import org.apache.hadoop.fs.Syncable;
 
 /**
  * Wrap a connection to a logger.
@@ -180,11 +180,6 @@ public class DfsLogger {
     }
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.tabletserver.log.IRemoteLogger#equals(java.lang.Object)
-   */
   @Override
   public boolean equals(Object obj) {
     // filename is unique
@@ -195,11 +190,6 @@ public class DfsLogger {
     return false;
   }
   
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.tabletserver.log.IRemoteLogger#hashCode()
-   */
   @Override
   public int hashCode() {
     // filename is unique
@@ -282,15 +272,13 @@ public class DfsLogger {
           // hsync: send data to datanodes and sync the data to disk
           sync = logFile.getClass().getMethod("hsync");
           e = null;
-        } catch (NoSuchMethodException ex) {
-        }
+        } catch (NoSuchMethodException ex) {}
         if (e != null)
           throw new RuntimeException(e);
       } catch (Exception e) {
         throw new RuntimeException(e);
       }
       
-      
       // Initialize the crypto operations.
       @SuppressWarnings("deprecation")
       org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory.getCryptoModule(conf
@@ -338,20 +326,20 @@ public class DfsLogger {
   
   private FSDataOutputStream create(FileSystem fs, Path logPath, boolean b, int buffersize, short replication, long blockSize) throws IOException {
     try {
-      // This... 
-      //    EnumSet<CreateFlag> set = EnumSet.of(CreateFlag.SYNC_BLOCK, CreateFlag.CREATE);
-      //    return fs.create(logPath, FsPermission.getDefault(), set, buffersize, replication, blockSize, null);
+      // This...
+      // EnumSet<CreateFlag> set = EnumSet.of(CreateFlag.SYNC_BLOCK, CreateFlag.CREATE);
+      // return fs.create(logPath, FsPermission.getDefault(), set, buffersize, replication, blockSize, null);
       // Becomes this:
       Class<?> createFlags = Class.forName("org.apache.hadoop.fs.CreateFlag");
       List<Enum<?>> flags = new ArrayList<Enum<?>>();
       if (createFlags.isEnum()) {
         for (Object constant : createFlags.getEnumConstants()) {
           if (constant.toString().equals("SYNC_BLOCK")) {
-            flags.add((Enum<?>)constant);
+            flags.add((Enum<?>) constant);
             log.debug("Found synch enum " + constant);
           }
           if (constant.toString().equals("CREATE")) {
-            flags.add((Enum<?>)constant);
+            flags.add((Enum<?>) constant);
             log.debug("Found CREATE enum " + constant);
           }
         }
@@ -359,11 +347,11 @@ public class DfsLogger {
       Object set = EnumSet.class.getMethod("of", java.lang.Enum.class, java.lang.Enum.class).invoke(null, flags.get(0), flags.get(1));
       log.debug("CreateFlag set: " + set);
       if (fs instanceof TraceFileSystem) {
-        fs = ((TraceFileSystem)fs).getImplementation();
+        fs = ((TraceFileSystem) fs).getImplementation();
       }
       Method create = fs.getClass().getMethod("create", Path.class, FsPermission.class, EnumSet.class, Integer.TYPE, Short.TYPE, Long.TYPE, Progressable.class);
       log.debug("creating " + logPath + " with SYNCH_BLOCK flag");
-      return (FSDataOutputStream)create.invoke(fs, logPath, FsPermission.getDefault(), set, buffersize, replication, blockSize, null);
+      return (FSDataOutputStream) create.invoke(fs, logPath, FsPermission.getDefault(), set, buffersize, replication, blockSize, null);
     } catch (ClassNotFoundException ex) {
       // Expected in hadoop 1.0
       return fs.create(logPath, b, buffersize, replication, blockSize);
@@ -372,12 +360,7 @@ public class DfsLogger {
       return fs.create(logPath, b, buffersize, replication, blockSize);
     }
   }
-
-  /*
-   * (non-Javadoc)
-   * 
-   * @see org.apache.accumulo.server.tabletserver.log.IRemoteLogger#toString()
-   */
+  
   @Override
   public String toString() {
     return getLogger() + "/" + getFileName();

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java Fri Jun 14 21:19:23 2013
@@ -30,6 +30,7 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.hadoop.conf.Configuration;
@@ -61,7 +62,7 @@ public class AddFilesWithMissingEntries 
     
     final Key rootTableEnd = new Key(Constants.ROOT_TABLET_EXTENT.getEndRow());
     final Range range = new Range(rootTableEnd.followingKey(PartialKey.ROW), true, Constants.METADATA_RESERVED_KEYSPACE_START_KEY, false);
-    final Scanner scanner = opts.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    final Scanner scanner = opts.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     scanner.setRange(range);
     final Configuration conf = new Configuration();
     final FileSystem fs = FileSystem.get(conf);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java Fri Jun 14 21:19:23 2013
@@ -24,7 +24,6 @@ import java.util.Set;
 import java.util.TreeSet;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.impl.Writer;
@@ -32,9 +31,11 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
 import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
@@ -105,7 +106,7 @@ public class CheckForMetadataProblems {
     if (opts.offline) {
       scanner = new OfflineMetadataScanner(ServerConfiguration.getSystemConfiguration(opts.getInstance()), fs);
     } else {
-      scanner =  opts.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+      scanner = opts.getConnector().createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     }
     
     scanner.setRange(Constants.METADATA_KEYSPACE);
@@ -180,14 +181,13 @@ public class CheckForMetadataProblems {
   }
   
   static class Opts extends ClientOpts {
-    @Parameter(names="--fix", description="best-effort attempt to fix problems found")
+    @Parameter(names = "--fix", description = "best-effort attempt to fix problems found")
     boolean fix = false;
     
-    @Parameter(names="--offline", description="perform the check on the files directly")
+    @Parameter(names = "--offline", description = "perform the check on the files directly")
     boolean offline = false;
   }
   
-  
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(CheckForMetadataProblems.class.getName(), args);

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java Fri Jun 14 21:19:23 2013
@@ -23,14 +23,15 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -44,7 +45,7 @@ public class LocalityCheck {
     
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
     Connector connector = opts.getConnector();
-    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     scanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
     scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
     scanner.setRange(Constants.METADATA_KEYSPACE);
@@ -77,7 +78,7 @@ public class LocalityCheck {
       return path;
     return "/" + path;
   }
-
+  
   private void addBlocks(FileSystem fs, String host, ArrayList<String> files, Map<String,Long> totalBlocks, Map<String,Long> localBlocks) throws Exception {
     long allBlocks = 0;
     long matchingBlocks = 0;

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java Fri Jun 14 21:19:23 2013
@@ -54,6 +54,7 @@ import org.apache.accumulo.core.data.Par
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileUtil;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
@@ -338,7 +339,7 @@ public class MetadataTable extends org.a
   public static SortedMap<String,DataFileValue> getDataFileSizes(KeyExtent extent, TCredentials credentials) {
     TreeMap<String,DataFileValue> sizes = new TreeMap<String,DataFileValue>();
     
-    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
+    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
     mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
     Text row = extent.getMetadataEntry();
     
@@ -392,7 +393,7 @@ public class MetadataTable extends org.a
     Constants.METADATA_OLD_PREV_ROW_COLUMN.putDelete(m);
     update(credentials, zooLock, m);
   }
-
+  
   public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double splitRatio, TCredentials credentials, ZooLock zooLock) {
     Mutation m = extent.getPrevRowUpdateMutation(); //
     
@@ -482,12 +483,12 @@ public class MetadataTable extends org.a
     String prefix = Constants.METADATA_DELETE_FLAG_PREFIX;
     if (tableId.equals(Constants.METADATA_TABLE_ID))
       prefix = Constants.METADATA_DELETE_FLAG_FOR_METADATA_PREFIX;
-
+    
     if (pathToRemove.startsWith("../"))
       delFlag = new Mutation(new Text(prefix + pathToRemove.substring(2)));
     else
       delFlag = new Mutation(new Text(prefix + "/" + tableId + pathToRemove));
-
+    
     delFlag.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
     return delFlag;
   }
@@ -510,8 +511,8 @@ public class MetadataTable extends org.a
     
     // check to see if prev tablet exist in metadata tablet
     Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(table, metadataPrevEndRow)));
-
-    ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
+    
+    ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
     scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
     
     if (!scanner2.iterator().hasNext()) {
@@ -520,10 +521,10 @@ public class MetadataTable extends org.a
       return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
     } else {
       log.info("Finishing incomplete split " + metadataEntry + " " + metadataPrevEndRow);
-
+      
       List<String> highDatafilesToRemove = new ArrayList<String>();
-
-      Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
+      
+      Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
       Key rowKey = new Key(metadataEntry);
       
       SortedMap<String,DataFileValue> origDatafileSizes = new TreeMap<String,DataFileValue>();
@@ -540,13 +541,12 @@ public class MetadataTable extends org.a
       
       splitDatafiles(table, metadataPrevEndRow, splitRatio, new HashMap<String,FileUtil.FileInfo>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes,
           highDatafilesToRemove);
-    
+      
       MetadataTable.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, credentials, lock);
       
       return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
     }
-
-
+    
   }
   
   public static void splitDatafiles(Text table, Text midRow, double splitRatio, Map<String,FileUtil.FileInfo> firstAndLastRows,
@@ -634,7 +634,7 @@ public class MetadataTable extends org.a
   }
   
   public static void deleteTable(String tableId, boolean insertDeletes, TCredentials credentials, ZooLock lock) throws AccumuloException {
-    Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
+    Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
     Text tableIdText = new Text(tableId);
     BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, new BatchWriterConfig().setMaxMemory(1000000)
         .setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2));
@@ -804,7 +804,7 @@ public class MetadataTable extends org.a
       }
       
     } else {
-      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
+      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
       scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
       scanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
       scanner.setRange(extent.toMetadataRange());
@@ -866,8 +866,8 @@ public class MetadataTable extends org.a
   private static void getRootLogEntries(ArrayList<LogEntry> result) throws KeeperException, InterruptedException, IOException {
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
     String root = getZookeeperLogLocation();
-    // there's a little race between getting the children and fetching 
-    // the data.  The log can be removed in between.
+    // there's a little race between getting the children and fetching
+    // the data. The log can be removed in between.
     while (true) {
       result.clear();
       for (String child : zoo.getChildren(root)) {
@@ -884,7 +884,7 @@ public class MetadataTable extends org.a
   }
   
   private static Scanner getTabletLogScanner(TCredentials credentials, KeyExtent extent) {
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
     scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
     Text start = extent.getMetadataEntry();
     Key endKey = new Key(start, Constants.METADATA_LOG_COLUMN_FAMILY);
@@ -902,7 +902,7 @@ public class MetadataTable extends org.a
       rootTabletEntries = getLogEntries(creds, Constants.ROOT_TABLET_EXTENT).iterator();
       try {
         Scanner scanner = HdfsZooInstance.getInstance().getConnector(creds.getPrincipal(), CredentialHelper.extractToken(creds))
-            .createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+            .createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
         scanner.fetchColumnFamily(Constants.METADATA_LOG_COLUMN_FAMILY);
         metadataEntries = scanner.iterator();
       } catch (Exception ex) {
@@ -991,7 +991,7 @@ public class MetadataTable extends org.a
   }
   
   private static Scanner createCloneScanner(String tableId, Connector conn) throws TableNotFoundException {
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS));
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
     mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
     mscanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
     mscanner.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
@@ -1136,7 +1136,7 @@ public class MetadataTable extends org.a
     }
     
     // delete the clone markers and create directory entries
-    Scanner mscanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner mscanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
     mscanner.fetchColumnFamily(Constants.METADATA_CLONED_COLUMN_FAMILY);
     
@@ -1161,7 +1161,7 @@ public class MetadataTable extends org.a
   }
   
   public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception {
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS));
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
     mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
     mscanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
     BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
@@ -1180,7 +1180,7 @@ public class MetadataTable extends org.a
   public static List<String> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) {
     List<String> result = new ArrayList<String>();
     try {
-      Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS));
+      Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY));
       mscanner.setRange(extent.toMetadataRange());
       mscanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
       for (Entry<Key,Value> entry : mscanner) {
@@ -1203,7 +1203,7 @@ public class MetadataTable extends org.a
     
     Map<String,Long> ret = new HashMap<String,Long>();
     
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
     scanner.setRange(new Range(metadataRow));
     scanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY);
     for (Entry<Key,Value> entry : scanner) {
@@ -1230,10 +1230,10 @@ public class MetadataTable extends org.a
     
     update(SecurityConstants.getSystemCredentials(), m);
   }
-
+  
   public static void moveMetaDeleteMarkers(Instance instance, TCredentials creds) {
     // move delete markers from the normal delete keyspace to the root tablet delete keyspace if the files are for the !METADATA table
-    Scanner scanner = new ScannerImpl(instance, creds, Constants.METADATA_TABLE_ID, Constants.NO_AUTHS);
+    Scanner scanner = new ScannerImpl(instance, creds, Constants.METADATA_TABLE_ID, Authorizations.EMPTY);
     scanner.setRange(new Range(Constants.METADATA_DELETES_KEYSPACE));
     for (Entry<Key,Value> entry : scanner) {
       String row = entry.getKey().getRow().toString();
@@ -1242,11 +1242,11 @@ public class MetadataTable extends org.a
         // add the new entry first
         log.info("Moving " + filename + " marker to the root tablet");
         Mutation m = new Mutation(Constants.METADATA_DELETE_FLAG_FOR_METADATA_PREFIX + filename);
-        m.put(new byte[]{}, new byte[]{}, new byte[]{});
+        m.put(new byte[] {}, new byte[] {}, new byte[] {});
         update(creds, m);
         // remove the old entry
         m = new Mutation(entry.getKey().getRow());
-        m.putDelete(new byte[]{}, new byte[]{});
+        m.putDelete(new byte[] {}, new byte[] {});
         update(creds, m);
       } else {
         break;

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java Fri Jun 14 21:19:23 2013
@@ -45,6 +45,7 @@ import org.apache.accumulo.core.iterator
 import org.apache.accumulo.core.iterators.system.MultiIterator;
 import org.apache.accumulo.core.iterators.system.VisibilityFilter;
 import org.apache.accumulo.core.iterators.user.VersioningIterator;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
 import org.apache.accumulo.core.util.TextUtil;
@@ -78,7 +79,7 @@ public class OfflineMetadataScanner exte
     DeletingIterator delIter = new DeletingIterator(multiIterator, false);
     ColumnFamilySkippingIterator cfsi = new ColumnFamilySkippingIterator(delIter);
     ColumnQualifierFilter colFilter = new ColumnQualifierFilter(cfsi, columns);
-    VisibilityFilter visFilter = new VisibilityFilter(colFilter, Constants.NO_AUTHS, new byte[0]);
+    VisibilityFilter visFilter = new VisibilityFilter(colFilter, Authorizations.EMPTY, new byte[0]);
     
     visFilter.seek(r, LocalityGroupUtil.EMPTY_CF_SET, false);
     

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java Fri Jun 14 21:19:23 2013
@@ -29,7 +29,6 @@ import java.util.concurrent.ExecutorServ
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -45,6 +44,7 @@ import org.apache.accumulo.core.data.thr
 import org.apache.accumulo.core.data.thrift.TColumn;
 import org.apache.accumulo.core.data.thrift.TKeyExtent;
 import org.apache.accumulo.core.data.thrift.TRange;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
@@ -90,8 +90,7 @@ public class VerifyTabletAssignments {
     
     Connector conn = opts.getConnector();
     Instance inst = conn.getInstance();
-    MetadataTable.getEntries(conn.getInstance(), CredentialHelper.create(opts.principal, opts.getToken(), opts.instance), tableName, false,
-        locations, tablets);
+    MetadataTable.getEntries(conn.getInstance(), CredentialHelper.create(opts.principal, opts.getToken(), opts.instance), tableName, false, locations, tablets);
     
     final HashSet<KeyExtent> failures = new HashSet<KeyExtent>();
     
@@ -151,8 +150,8 @@ public class VerifyTabletAssignments {
     }
   }
   
-  private static void checkTabletServer(AccumuloConfiguration conf, TCredentials st, Entry<String,List<KeyExtent>> entry,
-      HashSet<KeyExtent> failures) throws ThriftSecurityException, TException, NoSuchScanIDException {
+  private static void checkTabletServer(AccumuloConfiguration conf, TCredentials st, Entry<String,List<KeyExtent>> entry, HashSet<KeyExtent> failures)
+      throws ThriftSecurityException, TException, NoSuchScanIDException {
     TabletClientService.Iface client = ThriftUtil.getTServerClient(entry.getKey(), conf);
     
     Map<TKeyExtent,List<TRange>> batch = new TreeMap<TKeyExtent,List<TRange>>();
@@ -187,8 +186,8 @@ public class VerifyTabletAssignments {
     Map<String,Map<String,String>> emptyMapSMapSS = Collections.emptyMap();
     List<IterInfo> emptyListIterInfo = Collections.emptyList();
     List<TColumn> emptyListColumn = Collections.emptyList();
-    InitialMultiScan is = client.startMultiScan(tinfo, st, batch, emptyListColumn, emptyListIterInfo, emptyMapSMapSS, Constants.NO_AUTHS.getAuthorizationsBB(),
-        false);
+    InitialMultiScan is = client.startMultiScan(tinfo, st, batch, emptyListColumn, emptyListIterInfo, emptyMapSMapSS,
+        Authorizations.EMPTY.getAuthorizationsBB(), false);
     if (is.result.more) {
       MultiScanResult result = client.continueMultiScan(tinfo, is.scanID);
       checkFailures(entry.getKey(), failures, result);

Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java Fri Jun 14 21:19:23 2013
@@ -32,6 +32,7 @@ import org.apache.accumulo.core.client.s
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.CachedConfiguration;
@@ -109,7 +110,7 @@ public class TestConfirmDeletes {
     TCredentials credential = CredentialHelper.create("root", new PasswordToken(new byte[0]), "instance");
     
     Scanner scanner = instance.getConnector(credential.getPrincipal(), CredentialHelper.extractToken(credential)).createScanner(Constants.METADATA_TABLE_NAME,
-        Constants.NO_AUTHS);
+        Authorizations.EMPTY);
     int count = 0;
     for (@SuppressWarnings("unused")
     Entry<Key,Value> entry : scanner) {

Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java Fri Jun 14 21:19:23 2013
@@ -35,6 +35,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.server.master.state.Assignment;
@@ -137,7 +138,7 @@ public class TestMergeState {
     Assert.assertEquals(MergeState.WAITING_FOR_OFFLINE, newState);
     
     // unassign the tablets
-    BatchDeleter deleter = connector.createBatchDeleter("!METADATA", Constants.NO_AUTHS, 1000, new BatchWriterConfig());
+    BatchDeleter deleter = connector.createBatchDeleter("!METADATA", Authorizations.EMPTY, 1000, new BatchWriterConfig());
     deleter.fetchColumnFamily(Constants.METADATA_CURRENT_LOCATION_COLUMN_FAMILY);
     deleter.setRanges(Collections.singletonList(new Range()));
     deleter.delete();

Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java Fri Jun 14 21:19:23 2013
@@ -32,6 +32,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 
 public class CloneTest extends TestCase {
@@ -100,7 +101,7 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();
@@ -139,7 +140,7 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();
@@ -187,7 +188,7 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();
@@ -255,7 +256,7 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();
@@ -319,7 +320,7 @@ public class CloneTest extends TestCase 
     
     assertEquals(0, rc);
     
-    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+    Scanner scanner = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
     scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
     
     HashSet<String> files = new HashSet<String>();

Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java Fri Jun 14 21:19:23 2013
@@ -32,6 +32,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.server.util.TabletIterator.TabletDeletedException;
 import org.apache.hadoop.io.Text;
 
@@ -42,13 +43,13 @@ public class TabletIteratorTest extends 
     private Connector conn;
     
     public TestTabletIterator(Connector conn) throws Exception {
-      super(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS), Constants.METADATA_KEYSPACE, true, true);
+      super(conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY), Constants.METADATA_KEYSPACE, true, true);
       this.conn = conn;
     }
     
     protected void resetScanner() {
       try {
-        Scanner ds = conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+        Scanner ds = conn.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
         Text tablet = new KeyExtent(new Text("0"), new Text("m"), null).getMetadataEntry();
         ds.setRange(new Range(tablet, true, tablet, true));
         

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java (original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java Fri Jun 14 21:19:23 2013
@@ -36,6 +36,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.hadoop.io.Text;
@@ -55,12 +56,13 @@ public class QueryMetadataTable {
       this.row = row;
     }
     
+    @Override
     public void run() {
       try {
         KeyExtent extent = new KeyExtent(row, (Text) null);
         
         Connector connector = HdfsZooInstance.getInstance().getConnector(principal, token);
-        Scanner mdScanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
+        Scanner mdScanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Authorizations.EMPTY);
         Text row = extent.getMetadataEntry();
         
         mdScanner.setRange(new Range(row));
@@ -84,9 +86,9 @@ public class QueryMetadataTable {
   }
   
   static class Opts extends ClientOpts {
-    @Parameter(names="--numQueries", description="number of queries to run")
+    @Parameter(names = "--numQueries", description = "number of queries to run")
     int numQueries = 1;
-    @Parameter(names="--numThreads", description="number of threads used to run the queries")
+    @Parameter(names = "--numThreads", description = "number of threads used to run the queries")
     int numThreads = 1;
   }
   

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousWalk.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousWalk.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousWalk.java (original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousWalk.java Fri Jun 14 21:19:23 2013
@@ -26,15 +26,14 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.zip.CRC32;
 
-import org.apache.accumulo.trace.instrument.Span;
-import org.apache.accumulo.trace.instrument.Trace;
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.trace.instrument.Span;
+import org.apache.accumulo.trace.instrument.Trace;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -43,7 +42,6 @@ import org.apache.hadoop.io.Text;
 import com.beust.jcommander.IStringConverter;
 import com.beust.jcommander.Parameter;
 
-
 public class ContinuousWalk {
   
   static public class Opts extends ContinuousQuery.Opts {
@@ -57,7 +55,8 @@ public class ContinuousWalk {
         }
       }
     }
-    @Parameter(names="--authsFile", description="read the authorities to use from a file")
+    
+    @Parameter(names = "--authsFile", description = "read the authorities to use from a file")
     RandomAuths randomAuths = new RandomAuths();
   }
   
@@ -74,12 +73,12 @@ public class ContinuousWalk {
     private List<Authorizations> auths;
     
     RandomAuths() {
-      auths = Collections.singletonList(Constants.NO_AUTHS);
+      auths = Collections.singletonList(Authorizations.EMPTY);
     }
     
     RandomAuths(String file) throws IOException {
       if (file == null) {
-        auths = Collections.singletonList(Constants.NO_AUTHS);
+        auths = Collections.singletonList(Authorizations.EMPTY);
         return;
       }
       
@@ -101,7 +100,7 @@ public class ContinuousWalk {
       return auths.get(r.nextInt(auths.size()));
     }
   }
-
+  
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(ContinuousWalk.class.getName(), args);

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/AddSplitTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/AddSplitTest.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/AddSplitTest.java (original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/AddSplitTest.java Fri Jun 14 21:19:23 2013
@@ -24,7 +24,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -35,6 +34,7 @@ import org.apache.accumulo.core.client.T
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 
@@ -99,7 +99,7 @@ public class AddSplitTest extends Functi
   }
   
   private void verifyData(long ts) throws Exception {
-    Scanner scanner = getConnector().createScanner("foo", Constants.NO_AUTHS);
+    Scanner scanner = getConnector().createScanner("foo", Authorizations.EMPTY);
     
     Iterator<Entry<Key,Value>> iter = scanner.iterator();
     

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BadIteratorMincTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BadIteratorMincTest.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BadIteratorMincTest.java (original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BadIteratorMincTest.java Fri Jun 14 21:19:23 2013
@@ -21,7 +21,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Scanner;
@@ -29,6 +28,7 @@ import org.apache.accumulo.core.conf.Pro
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 
@@ -69,7 +69,7 @@ public class BadIteratorMincTest extends
     checkRFiles("foo", 1, 1, 0, 0);
     
     // try to scan table
-    Scanner scanner = getConnector().createScanner("foo", Constants.NO_AUTHS);
+    Scanner scanner = getConnector().createScanner("foo", Authorizations.EMPTY);
     
     int count = 0;
     for (@SuppressWarnings("unused")
@@ -115,7 +115,7 @@ public class BadIteratorMincTest extends
     
     // this should not hang
     getConnector().tableOperations().delete("foo");
-
+    
   }
   
 }

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitTest.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitTest.java (original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitTest.java Fri Jun 14 21:19:23 2013
@@ -25,7 +25,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
@@ -34,6 +33,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 
@@ -95,7 +95,7 @@ public class BatchScanSplitTest extends 
     
     // logger.setLevel(Level.TRACE);
     
-    BatchScanner bs = getConnector().createBatchScanner("bss", Constants.NO_AUTHS, 4);
+    BatchScanner bs = getConnector().createBatchScanner("bss", Authorizations.EMPTY, 4);
     
     HashMap<Text,Value> found = new HashMap<Text,Value>();
     

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushTest.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushTest.java (original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushTest.java Fri Jun 14 21:19:23 2013
@@ -25,7 +25,6 @@ import java.util.Map.Entry;
 import java.util.concurrent.TimeUnit;
 import java.util.Random;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -37,6 +36,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 
@@ -73,7 +73,7 @@ public class BatchWriterFlushTest extend
   private void runLatencyTest() throws Exception {
     // should automatically flush after 3 seconds
     BatchWriter bw = getConnector().createBatchWriter("bwlt", new BatchWriterConfig().setMaxLatency(2000, TimeUnit.MILLISECONDS));
-    Scanner scanner = getConnector().createScanner("bwlt", Constants.NO_AUTHS);
+    Scanner scanner = getConnector().createScanner("bwlt", Authorizations.EMPTY);
     
     Mutation m = new Mutation(new Text(String.format("r_%10d", 1)));
     m.put(new Text("cf"), new Text("cq"), new Value(("" + 1).getBytes()));
@@ -107,7 +107,7 @@ public class BatchWriterFlushTest extend
   
   private void runFlushTest() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException, Exception {
     BatchWriter bw = getConnector().createBatchWriter("bwft", new BatchWriterConfig());
-    Scanner scanner = getConnector().createScanner("bwft", Constants.NO_AUTHS);
+    Scanner scanner = getConnector().createScanner("bwft", Authorizations.EMPTY);
     Random r = new Random();
     
     for (int i = 0; i < 4; i++) {

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BloomFilterTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BloomFilterTest.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BloomFilterTest.java (original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BloomFilterTest.java Fri Jun 14 21:19:23 2013
@@ -24,7 +24,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
@@ -35,6 +34,7 @@ import org.apache.accumulo.core.data.Mut
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 
 public class BloomFilterTest extends FunctionalTest {
@@ -75,7 +75,7 @@ public class BloomFilterTest extends Fun
     bw.close();
     getConnector().tableOperations().flush("bt4", null, null, true);
     
-    for (String table : new String[]{"bt1", "bt2", "bt3"}) {
+    for (String table : new String[] {"bt1", "bt2", "bt3"}) {
       getConnector().tableOperations().setProperty(table, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "false");
       getConnector().tableOperations().setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "false");
       getConnector().tableOperations().flush(table, null, null, true);
@@ -122,7 +122,7 @@ public class BloomFilterTest extends Fun
     timeCheck(t3, tb3);
     
     // test querying for empty key
-    Scanner scanner = getConnector().createScanner("bt4", Constants.NO_AUTHS);
+    Scanner scanner = getConnector().createScanner("bt4", Authorizations.EMPTY);
     scanner.setRange(new Range(new Text("")));
     
     if (!scanner.iterator().next().getValue().toString().equals("foo1")) {
@@ -172,7 +172,7 @@ public class BloomFilterTest extends Fun
       ranges.add(range);
     }
     
-    BatchScanner bs = getConnector().createBatchScanner(table, Constants.NO_AUTHS, 3);
+    BatchScanner bs = getConnector().createBatchScanner(table, Authorizations.EMPTY, 3);
     bs.setRanges(ranges);
     
     long t1 = System.currentTimeMillis();

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BulkFileTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BulkFileTest.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BulkFileTest.java (original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/BulkFileTest.java Fri Jun 14 21:19:23 2013
@@ -22,7 +22,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.Key;
@@ -31,6 +30,7 @@ import org.apache.accumulo.core.file.Fil
 import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.file.FileUtil;
 import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.trace.TraceFileSystem;
 import org.apache.hadoop.conf.Configuration;
@@ -87,7 +87,7 @@ public class BulkFileTest extends Functi
   }
   
   private void verifyData(String table, int s, int e) throws Exception {
-    Scanner scanner = getConnector().createScanner(table, Constants.NO_AUTHS);
+    Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
     
     Iterator<Entry<Key,Value>> iter = scanner.iterator();
     

Modified: accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyTest.java?rev=1493250&r1=1493249&r2=1493250&view=diff
==============================================================================
--- accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyTest.java (original)
+++ accumulo/trunk/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyTest.java Fri Jun 14 21:19:23 2013
@@ -22,7 +22,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
@@ -32,6 +31,7 @@ import org.apache.accumulo.core.conf.Pro
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 
@@ -43,7 +43,7 @@ public class ConcurrencyTest extends Fun
     Scanner scanner;
     
     ScanTask(Connector conn, long time) throws Exception {
-      scanner = conn.createScanner("cct", Constants.NO_AUTHS);
+      scanner = conn.createScanner("cct", Authorizations.EMPTY);
       IteratorSetting slow = new IteratorSetting(30, "slow", SlowIterator.class);
       slow.addOption("sleepTime", "" + time);
       scanner.addScanIterator(slow);