You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by el...@apache.org on 2014/09/22 22:44:59 UTC

[01/15] git commit: branching for gradual introduction of feature set around ACCUMULO-652

Repository: accumulo
Updated Branches:
  refs/heads/ACCUMULO-652 [created] 58fcad6ec


branching for gradual introduction of feature set around ACCUMULO-652

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1354155 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/fd77a560
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/fd77a560
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/fd77a560

Branch: refs/heads/ACCUMULO-652
Commit: fd77a5601ac478bc1e20f830bce4437bc4ccdc85
Parents: 65fc814
Author: Adam Fuchs <af...@apache.org>
Authored: Tue Jun 26 18:36:39 2012 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Tue Jun 26 18:36:39 2012 +0000

----------------------------------------------------------------------

----------------------------------------------------------------------



[15/15] git commit: ACCUMULO-652 merged changes from trunk

Posted by el...@apache.org.
ACCUMULO-652 merged changes from trunk

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1438749 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/58fcad6e
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/58fcad6e
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/58fcad6e

Branch: refs/heads/ACCUMULO-652
Commit: 58fcad6ec19e4c41ab733584c584bf5fe64fbc4f
Parents: 7bfa823 2983a1f
Author: Adam Fuchs <af...@apache.org>
Authored: Fri Jan 25 22:50:58 2013 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Fri Jan 25 22:50:58 2013 +0000

----------------------------------------------------------------------
 .gitignore                                      |   148 +-
 LICENSE                                         |   240 +-
 README                                          |    36 +-
 assemble/build.sh                               |     2 +-
 assemble/dist.xml                               |   184 +-
 assemble/pom.xml                                |    76 +-
 assemble/scripts/gc-only-init.sh                |    47 +
 assemble/scripts/init.d/accumulo-gc             |   159 +
 assemble/scripts/init.d/accumulo-master         |   160 +
 assemble/scripts/init.d/accumulo-monitor        |   159 +
 assemble/scripts/init.d/accumulo-slave          |   164 +
 assemble/scripts/init.d/accumulo-tracer         |   159 +
 assemble/scripts/init.d/accumulo-tserver        |   160 +
 assemble/scripts/master-only-init.sh            |    46 +
 assemble/scripts/monitor-only-init.sh           |    45 +
 assemble/scripts/slave-only-init.sh             |    46 +
 assemble/scripts/stand-alone-init.sh            |    26 +
 assemble/scripts/tracer-only-init.sh            |    45 +
 assemble/scripts/tserver-only-init.sh           |    46 +
 bin/accumulo                                    |    73 +-
 bin/bootstrap_hdfs.sh                           |    76 +
 bin/config.sh                                   |    21 +-
 bin/start-all.sh                                |     3 +-
 bin/start-server.sh                             |    41 +-
 bin/stop-all.sh                                 |     4 +-
 bin/stop-server.sh                              |    26 +-
 bin/tool.sh                                     |    19 +-
 .../1GB/native-standalone/accumulo-env.sh       |    15 +-
 .../1GB/native-standalone/accumulo-site.xml     |   191 +-
 .../1GB/native-standalone/generic_logger.xml    |     8 +-
 .../1GB/native-standalone/log4j.properties      |     6 +-
 .../1GB/native-standalone/monitor_logger.xml    |     4 +-
 conf/examples/1GB/standalone/accumulo-env.sh    |    15 +-
 conf/examples/1GB/standalone/accumulo-site.xml  |   181 +-
 conf/examples/1GB/standalone/generic_logger.xml |     8 +-
 conf/examples/1GB/standalone/log4j.properties   |     6 +-
 conf/examples/1GB/standalone/monitor_logger.xml |     4 +-
 .../2GB/native-standalone/accumulo-env.sh       |    14 +-
 .../2GB/native-standalone/accumulo-site.xml     |   171 +-
 .../2GB/native-standalone/generic_logger.xml    |     8 +-
 .../2GB/native-standalone/log4j.properties      |     6 +-
 .../2GB/native-standalone/monitor_logger.xml    |     4 +-
 conf/examples/2GB/standalone/accumulo-env.sh    |    15 +-
 conf/examples/2GB/standalone/accumulo-site.xml  |    23 +-
 conf/examples/2GB/standalone/generic_logger.xml |     8 +-
 conf/examples/2GB/standalone/log4j.properties   |     6 +-
 conf/examples/2GB/standalone/monitor_logger.xml |     4 +-
 .../3GB/native-standalone/accumulo-env.sh       |    14 +-
 .../3GB/native-standalone/accumulo-site.xml     |   161 +-
 .../3GB/native-standalone/generic_logger.xml    |     8 +-
 .../3GB/native-standalone/log4j.properties      |     6 +-
 .../3GB/native-standalone/monitor_logger.xml    |     4 +-
 conf/examples/3GB/standalone/accumulo-env.sh    |    14 +-
 conf/examples/3GB/standalone/accumulo-site.xml  |   161 +-
 conf/examples/3GB/standalone/generic_logger.xml |     8 +-
 conf/examples/3GB/standalone/log4j.properties   |     6 +-
 conf/examples/3GB/standalone/monitor_logger.xml |     4 +-
 .../512MB/native-standalone/accumulo-env.sh     |    14 +-
 .../512MB/native-standalone/accumulo-site.xml   |   181 +-
 .../512MB/native-standalone/generic_logger.xml  |     8 +-
 .../512MB/native-standalone/log4j.properties    |     6 +-
 .../512MB/native-standalone/monitor_logger.xml  |     4 +-
 conf/examples/512MB/standalone/accumulo-env.sh  |    14 +-
 .../examples/512MB/standalone/accumulo-site.xml |   191 +-
 .../512MB/standalone/generic_logger.xml         |     8 +-
 conf/examples/512MB/standalone/log4j.properties |     6 +-
 .../512MB/standalone/monitor_logger.xml         |     4 +-
 conf/examples/vfs-classloader/accumulo-site.xml |   116 +
 contrib/Eclipse-Accumulo-Codestyle.xml          |     2 +-
 contrib/Eclipse-Accumulo-Template.xml           |     8 +-
 contrib/findbugs_build.xml                      |    80 +
 contrib/run_findbugs.sh                         |     3 +
 core/pom.xml                                    |    60 +-
 .../org/apache/accumulo/core/Constants.java     |    30 +-
 .../accumulo/core/bloomfilter/BloomFilter.java  |    17 +-
 .../core/bloomfilter/DynamicBloomFilter.java    |    16 +-
 .../accumulo/core/bloomfilter/Filter.java       |    12 +-
 .../accumulo/core/cli/BatchScannerOpts.java     |    30 +
 .../accumulo/core/cli/BatchWriterOpts.java      |    50 +
 .../accumulo/core/cli/ClientOnDefaultTable.java |    52 +
 .../core/cli/ClientOnRequiredTable.java         |    40 +
 .../apache/accumulo/core/cli/ClientOpts.java    |   227 +
 .../java/org/apache/accumulo/core/cli/Help.java |    45 +
 .../apache/accumulo/core/cli/ScannerOpts.java   |    24 +
 .../accumulo/core/client/AccumuloException.java |     6 +-
 .../core/client/AccumuloSecurityException.java  |    24 +-
 .../accumulo/core/client/BatchScanner.java      |    16 +
 .../accumulo/core/client/BatchWriter.java       |     8 +
 .../accumulo/core/client/BatchWriterConfig.java |   224 +
 .../core/client/ClientSideIteratorScanner.java  |    47 +-
 .../apache/accumulo/core/client/Connector.java  |    57 +
 .../apache/accumulo/core/client/Instance.java   |    38 +
 .../accumulo/core/client/IsolatedScanner.java   |    27 +-
 .../accumulo/core/client/IteratorSetting.java   |    37 +-
 .../core/client/MutationsRejectedException.java |    19 +-
 .../apache/accumulo/core/client/Scanner.java    |     4 +
 .../accumulo/core/client/ScannerBase.java       |    19 +
 .../accumulo/core/client/TimedOutException.java |    54 +
 .../accumulo/core/client/ZooKeeperInstance.java |    67 +-
 .../core/client/admin/ActiveCompaction.java     |   184 +
 .../accumulo/core/client/admin/ActiveScan.java  |    17 +
 .../accumulo/core/client/admin/FindMax.java     |     2 +-
 .../core/client/admin/InstanceOperations.java   |    21 +
 .../client/admin/InstanceOperationsImpl.java    |   139 +-
 .../core/client/admin/SecurityOperations.java   |    56 +
 .../client/admin/SecurityOperationsImpl.java    |   182 +-
 .../core/client/admin/TableOperations.java      |    31 +
 .../client/admin/TableOperationsHelper.java     |     6 +-
 .../core/client/admin/TableOperationsImpl.java  |   149 +-
 .../client/impl/AccumuloServerException.java    |     4 +-
 .../core/client/impl/BatchWriterImpl.java       |     9 +-
 .../core/client/impl/ConnectorImpl.java         |    95 +-
 .../accumulo/core/client/impl/MasterClient.java |    16 +-
 .../client/impl/MetadataLocationObtainer.java   |    26 +-
 .../client/impl/MultiTableBatchWriterImpl.java  |     7 +-
 .../core/client/impl/OfflineScanner.java        |    51 +-
 .../accumulo/core/client/impl/ScannerImpl.java  |    46 +-
 .../core/client/impl/ScannerIterator.java       |     6 +-
 .../core/client/impl/ScannerOptions.java        |    20 +
 .../accumulo/core/client/impl/ServerClient.java |    44 +-
 .../core/client/impl/TabletLocator.java         |    30 +-
 .../core/client/impl/TabletLocatorImpl.java     |    25 +-
 .../client/impl/TabletServerBatchDeleter.java   |    19 +-
 .../client/impl/TabletServerBatchReader.java    |     8 +-
 .../impl/TabletServerBatchReaderIterator.java   |   158 +-
 .../client/impl/TabletServerBatchWriter.java    |   177 +-
 .../accumulo/core/client/impl/TabletType.java   |     1 -
 .../core/client/impl/ThriftScanner.java         |    78 +-
 .../core/client/impl/ThriftTransportPool.java   |     4 +-
 .../core/client/impl/TimeoutTabletLocator.java  |   136 +
 .../accumulo/core/client/impl/Writer.java       |    21 +-
 .../core/client/impl/thrift/ClientService.java  | 11281 ++-
 .../client/impl/thrift/ConfigurationType.java   |    21 +-
 .../core/client/impl/thrift/TableOperation.java |    29 +-
 .../thrift/TableOperationExceptionType.java     |    21 +-
 .../thrift/ThriftTableOperationException.java   |   311 +-
 .../core/client/impl/thrift/ThriftTest.java     |  2255 +
 .../client/mapred/AccumuloFileOutputFormat.java |   178 +
 .../core/client/mapred/AccumuloInputFormat.java |    82 +
 .../client/mapred/AccumuloOutputFormat.java     |   477 +
 .../client/mapred/AccumuloRowInputFormat.java   |    85 +
 .../core/client/mapred/InputFormatBase.java     |   805 +
 .../mapreduce/AccumuloFileOutputFormat.java     |   209 +-
 .../client/mapreduce/AccumuloInputFormat.java   |    20 +-
 .../client/mapreduce/AccumuloOutputFormat.java  |   551 +-
 .../mapreduce/AccumuloRowInputFormat.java       |    19 +
 .../core/client/mapreduce/InputFormatBase.java  |  1185 +-
 .../lib/partition/KeyRangePartitioner.java      |     6 +-
 .../lib/partition/RangePartitioner.java         |     7 +-
 .../mapreduce/lib/util/ConfiguratorBase.java    |   228 +
 .../lib/util/FileOutputConfigurator.java        |   187 +
 .../mapreduce/lib/util/InputConfigurator.java   |   529 +
 .../mapreduce/lib/util/OutputConfigurator.java  |   204 +
 .../client/mapreduce/lib/util/package-info.java |    34 +
 .../accumulo/core/client/mock/MockAccumulo.java |    10 +
 .../core/client/mock/MockBatchScanner.java      |    22 +-
 .../core/client/mock/MockConnector.java         |    33 +-
 .../accumulo/core/client/mock/MockInstance.java |    81 +-
 .../client/mock/MockInstanceOperations.java     |    26 +-
 .../accumulo/core/client/mock/MockScanner.java  |    14 +-
 .../client/mock/MockSecurityOperations.java     |    67 +-
 .../accumulo/core/client/mock/MockTable.java    |    19 +-
 .../core/client/mock/MockTableOperations.java   |   146 +-
 .../core/conf/AccumuloConfiguration.java        |     1 +
 .../core/conf/DefaultConfiguration.java         |     2 +-
 .../org/apache/accumulo/core/conf/Property.java |   124 +-
 .../apache/accumulo/core/conf/PropertyType.java |    12 +-
 .../accumulo/core/conf/SiteConfiguration.java   |     4 +-
 .../apache/accumulo/core/data/ColumnUpdate.java |    43 +-
 .../apache/accumulo/core/data/KeyExtent.java    |     5 +-
 .../org/apache/accumulo/core/data/Mutation.java |   317 +-
 .../apache/accumulo/core/data/PartialKey.java   |     7 +-
 .../org/apache/accumulo/core/data/Range.java    |     2 +-
 .../org/apache/accumulo/core/data/Value.java    |     5 +-
 .../core/data/thrift/InitialMultiScan.java      |   216 +-
 .../accumulo/core/data/thrift/InitialScan.java  |   216 +-
 .../accumulo/core/data/thrift/IterInfo.java     |   247 +-
 .../accumulo/core/data/thrift/MapFileInfo.java  |   173 +-
 .../core/data/thrift/MultiScanResult.java       |   629 +-
 .../accumulo/core/data/thrift/ScanResult.java   |   262 +-
 .../accumulo/core/data/thrift/TColumn.java      |   235 +-
 .../thrift/TConstraintViolationSummary.java     |   289 +-
 .../apache/accumulo/core/data/thrift/TKey.java  |   321 +-
 .../accumulo/core/data/thrift/TKeyExtent.java   |   235 +-
 .../accumulo/core/data/thrift/TKeyValue.java    |   204 +-
 .../accumulo/core/data/thrift/TMutation.java    |   333 +-
 .../accumulo/core/data/thrift/TRange.java       |   385 +-
 .../accumulo/core/data/thrift/UpdateErrors.java |   445 +-
 .../accumulo/core/file/BloomFilterLayer.java    |    18 +-
 .../org/apache/accumulo/core/file/FileUtil.java |     2 +-
 .../core/file/blockfile/ABlockReader.java       |    16 +
 .../core/file/blockfile/cache/BlockCache.java   |    13 +-
 .../core/file/blockfile/cache/CacheEntry.java   |    37 +-
 .../core/file/blockfile/cache/CachedBlock.java  |    15 +-
 .../file/blockfile/cache/LruBlockCache.java     |    13 +-
 .../file/blockfile/cache/SimpleBlockCache.java  |    54 +-
 .../file/blockfile/impl/CachableBlockFile.java  |   140 +-
 .../accumulo/core/file/rfile/BlockIndex.java    |   181 +
 .../core/file/rfile/MultiLevelIndex.java        |     1 -
 .../accumulo/core/file/rfile/PrintInfo.java     |    44 +-
 .../apache/accumulo/core/file/rfile/RFile.java  |   176 +-
 .../accumulo/core/file/rfile/RelativeKey.java   |   463 +-
 .../accumulo/core/file/rfile/SplitLarge.java    |    37 +-
 .../accumulo/core/file/rfile/bcfile/BCFile.java |     1 -
 .../bcfile/BoundedRangeFileInputStream.java     |     7 +-
 .../core/file/rfile/bcfile/PrintInfo.java       |    12 +-
 .../accumulo/core/file/rfile/bcfile/TFile.java  |     3 +-
 .../core/file/rfile/bcfile/TFileDumper.java     |     8 +-
 .../core/gc/thrift/GCMonitorService.java        |   541 +-
 .../accumulo/core/gc/thrift/GCStatus.java       |   296 +-
 .../accumulo/core/gc/thrift/GcCycleStats.java   |   383 +-
 .../core/iterators/AggregatingIterator.java     |     7 +-
 .../accumulo/core/iterators/Combiner.java       |    21 +-
 .../iterators/FamilyIntersectingIterator.java   |     1 +
 .../apache/accumulo/core/iterators/Filter.java  |     6 +-
 .../core/iterators/FirstEntryInRowIterator.java |    23 +-
 .../accumulo/core/iterators/GrepIterator.java   |     1 +
 .../core/iterators/IntersectingIterator.java    |     1 +
 .../accumulo/core/iterators/IteratorUtil.java   |    30 +-
 .../accumulo/core/iterators/LargeRowFilter.java |     1 +
 .../accumulo/core/iterators/LongCombiner.java   |     9 +-
 .../core/iterators/OptionDescriber.java         |     4 +-
 .../accumulo/core/iterators/OrIterator.java     |    13 +-
 .../core/iterators/RowDeletingIterator.java     |     1 +
 .../core/iterators/SortedKeyIterator.java       |     2 +-
 .../core/iterators/SortedKeyValueIterator.java  |     7 +
 .../core/iterators/TypedValueCombiner.java      |    13 +-
 .../core/iterators/VersioningIterator.java      |     1 +
 .../core/iterators/WholeRowIterator.java        |     1 +
 .../core/iterators/WrappingIterator.java        |     9 +
 .../core/iterators/aggregation/Aggregator.java  |     1 +
 .../iterators/aggregation/LongSummation.java    |     1 +
 .../aggregation/NumArraySummation.java          |     1 +
 .../iterators/aggregation/NumSummation.java     |     1 +
 .../core/iterators/aggregation/StringMax.java   |     1 +
 .../core/iterators/aggregation/StringMin.java   |     1 +
 .../iterators/aggregation/StringSummation.java  |     1 +
 .../conf/AggregatorConfiguration.java           |     1 +
 .../aggregation/conf/AggregatorSet.java         |     1 +
 .../accumulo/core/iterators/conf/ColumnSet.java |     2 +-
 .../iterators/conf/ColumnToClassMapping.java    |     4 +-
 .../iterators/conf/PerColumnIteratorConfig.java |     1 +
 .../core/iterators/system/GenericFilterer.java  |     1 +
 .../core/iterators/system/HeapIterator.java     |     3 -
 .../system/SourceSwitchingIterator.java         |     2 +-
 .../core/iterators/system/VisibilityFilter.java |    18 +-
 .../core/iterators/user/AgeOffFilter.java       |     7 +-
 .../core/iterators/user/ColumnAgeOffFilter.java |     9 +-
 .../core/iterators/user/IndexedDocIterator.java |    10 +-
 .../iterators/user/IntersectingIterator.java    |    42 +-
 .../core/iterators/user/LargeRowFilter.java     |    19 +-
 .../core/iterators/user/RegExFilter.java        |    91 +-
 .../iterators/user/SummingArrayCombiner.java    |     9 +-
 .../core/iterators/user/SummingCombiner.java    |     3 +-
 .../core/iterators/user/TimestampFilter.java    |    78 +-
 .../iterators/user/TransformingIterator.java    |   656 +
 .../core/iterators/user/VersioningIterator.java |     9 +-
 .../accumulo/core/master/thrift/Compacting.java |   215 +-
 .../accumulo/core/master/thrift/DeadServer.java |   247 +-
 .../core/master/thrift/MasterClientService.java |  9060 +-
 .../core/master/thrift/MasterGoalState.java     |    21 +-
 .../core/master/thrift/MasterMonitorInfo.java   |   704 +-
 .../core/master/thrift/MasterState.java         |    21 +-
 .../core/master/thrift/RecoveryException.java   |   163 +-
 .../core/master/thrift/RecoveryStatus.java      |   252 +-
 .../accumulo/core/master/thrift/TableInfo.java  |   794 +-
 .../core/master/thrift/TableOperation.java      |    29 +-
 .../core/master/thrift/TabletLoadState.java     |    21 +-
 .../core/master/thrift/TabletServerStatus.java  |   691 +-
 .../core/master/thrift/TabletSplit.java         |   256 +-
 .../accumulo/core/security/Authorizations.java  |    87 +-
 .../core/security/ColumnVisibility.java         |   186 +-
 .../accumulo/core/security/SecurityUtil.java    |    84 +
 .../accumulo/core/security/thrift/AuthInfo.java |   260 +-
 .../core/security/thrift/SecurityErrorCode.java |    41 +-
 .../thrift/ThriftInstanceTokenWrapper.java      |   613 +
 .../security/thrift/ThriftKerberosToken.java    |   513 +
 .../thrift/ThriftSecurityException.java         |   200 +-
 .../security/thrift/ThriftUserPassToken.java    |   513 +
 .../core/security/tokens/AccumuloToken.java     |    14 +
 .../security/tokens/InstanceTokenWrapper.java   |    82 +
 .../core/security/tokens/KerberosToken.java     |   160 +
 .../security/tokens/LoginCallbackHandler.java   |    53 +
 .../core/security/tokens/PasswordUpdatable.java |     7 +
 .../core/security/tokens/TokenHelper.java       |   110 +
 .../core/security/tokens/UserPassToken.java     |    75 +
 .../core/tabletserver/thrift/ActionStats.java   |   467 +-
 .../tabletserver/thrift/ActiveCompaction.java   |  1645 +
 .../core/tabletserver/thrift/ActiveScan.java    |   932 +-
 .../tabletserver/thrift/CompactionReason.java   |    70 +
 .../tabletserver/thrift/CompactionType.java     |    67 +
 .../thrift/ConstraintViolationException.java    |   215 +-
 .../tabletserver/thrift/IteratorConfig.java     |   213 +-
 .../thrift/NoSuchScanIDException.java           |   122 +-
 .../thrift/NotServingTabletException.java       |   169 +-
 .../core/tabletserver/thrift/ScanState.java     |    21 +-
 .../core/tabletserver/thrift/ScanType.java      |    21 +-
 .../tabletserver/thrift/TIteratorSetting.java   |   342 +-
 .../thrift/TabletClientService.java             | 15075 +--
 .../tabletserver/thrift/TabletMutations.java    |   530 -
 .../core/tabletserver/thrift/TabletStats.java   |   705 +-
 .../thrift/TooManyFilesException.java           |   169 +-
 .../apache/accumulo/core/trace/TraceDump.java   |    87 +-
 .../accumulo/core/trace/TraceFormatter.java     |    18 +-
 .../org/apache/accumulo/core/util/ColumnFQ.java |    26 +-
 .../accumulo/core/util/ContextFactory.java      |   174 -
 .../accumulo/core/util/LocalityGroupUtil.java   |     2 +-
 .../accumulo/core/util/LoggingRunnable.java     |    16 -
 .../org/apache/accumulo/core/util/Merge.java    |   101 +-
 .../accumulo/core/util/MetadataTable.java       |    30 +-
 .../accumulo/core/util/PeekingIterator.java     |    31 +
 .../accumulo/core/util/ServerServices.java      |     4 +-
 .../accumulo/core/util/SimpleThreadPool.java    |     2 +-
 .../apache/accumulo/core/util/StopWatch.java    |     2 +-
 .../accumulo/core/util/TTimeoutTransport.java   |     7 +-
 .../accumulo/core/util/TableDiskUsage.java      |     2 +-
 .../apache/accumulo/core/util/ThriftUtil.java   |    66 +-
 .../core/util/format/DeleterFormatter.java      |     2 +-
 .../accumulo/core/util/format/HexFormatter.java |   136 +
 .../util/interpret/DefaultScanInterpreter.java  |    51 +
 .../core/util/interpret/HexScanInterpreter.java |    27 +
 .../core/util/interpret/ScanInterpreter.java    |    35 +
 .../apache/accumulo/core/util/shell/Shell.java  |   146 +-
 .../core/util/shell/commands/AboutCommand.java  |     7 +-
 .../commands/ActiveCompactionIterator.java      |   136 +
 .../util/shell/commands/ActiveScanIterator.java |    23 +-
 .../util/shell/commands/AddAuthsCommand.java    |    80 +
 .../util/shell/commands/AddSplitsCommand.java   |    22 +-
 .../shell/commands/AuthenticateCommand.java     |    12 +-
 .../util/shell/commands/ClasspathCommand.java   |     6 +-
 .../core/util/shell/commands/ClearCommand.java  |     6 +-
 .../util/shell/commands/CloneTableCommand.java  |    13 +-
 .../util/shell/commands/CompactCommand.java     |    36 +-
 .../core/util/shell/commands/ConfigCommand.java |   120 +-
 .../util/shell/commands/ConstraintCommand.java  |    12 +-
 .../util/shell/commands/CreateTableCommand.java |    52 +-
 .../util/shell/commands/CreateUserCommand.java  |    41 +-
 .../core/util/shell/commands/DUCommand.java     |    14 +-
 .../core/util/shell/commands/DebugCommand.java  |    13 +-
 .../core/util/shell/commands/DeleteCommand.java |    46 +-
 .../util/shell/commands/DeleteIterCommand.java  |    22 +-
 .../util/shell/commands/DeleteManyCommand.java  |    31 +-
 .../util/shell/commands/DeleteRowsCommand.java  |    10 +-
 .../shell/commands/DeleteScanIterCommand.java   |    24 +-
 .../shell/commands/DeleteShellterCommand.java   |   100 +
 .../util/shell/commands/DeleteTableCommand.java |    14 +-
 .../util/shell/commands/DropUserCommand.java    |     9 +-
 .../core/util/shell/commands/EGrepCommand.java  |    24 +-
 .../util/shell/commands/EscapeTokenizer.java    |    21 +-
 .../util/shell/commands/ExecfileCommand.java    |    10 +-
 .../core/util/shell/commands/ExitCommand.java   |     2 +-
 .../util/shell/commands/ExportTableCommand.java |    78 +
 .../core/util/shell/commands/FlushCommand.java  |     6 +-
 .../util/shell/commands/FormatterCommand.java   |   112 +-
 .../util/shell/commands/GetAuthsCommand.java    |     6 +-
 .../util/shell/commands/GetGroupsCommand.java   |    12 +-
 .../util/shell/commands/GetSplitsCommand.java   |    18 +-
 .../core/util/shell/commands/GrantCommand.java  |    12 +-
 .../core/util/shell/commands/GrepCommand.java   |    43 +-
 .../core/util/shell/commands/HelpCommand.java   |    31 +-
 .../core/util/shell/commands/HiddenCommand.java |     8 +-
 .../util/shell/commands/HistoryCommand.java     |    12 +-
 .../shell/commands/ImportDirectoryCommand.java  |    11 +-
 .../util/shell/commands/ImportTableCommand.java |    51 +
 .../core/util/shell/commands/InsertCommand.java |    60 +-
 .../util/shell/commands/InterpreterCommand.java |    40 +
 .../shell/commands/ListCompactionsCommand.java  |    78 +
 .../util/shell/commands/ListIterCommand.java    |    20 +-
 .../util/shell/commands/ListScansCommand.java   |     8 +-
 .../shell/commands/ListShellIterCommand.java    |   105 +
 .../core/util/shell/commands/MaxRowCommand.java |    28 +-
 .../core/util/shell/commands/MergeCommand.java  |    34 +-
 .../util/shell/commands/NoTableCommand.java     |     2 +-
 .../util/shell/commands/OfflineCommand.java     |     2 +-
 .../core/util/shell/commands/OnlineCommand.java |     2 +-
 .../core/util/shell/commands/OptUtil.java       |    39 +-
 .../core/util/shell/commands/PasswdCommand.java |    24 +-
 .../core/util/shell/commands/PingCommand.java   |    82 +
 .../core/util/shell/commands/PingIterator.java  |    58 +
 .../shell/commands/QuotedStringTokenizer.java   |    38 +-
 .../util/shell/commands/RenameTableCommand.java |     4 +-
 .../core/util/shell/commands/RevokeCommand.java |    12 +-
 .../core/util/shell/commands/ScanCommand.java   |   214 +-
 .../util/shell/commands/SetAuthsCommand.java    |    14 +-
 .../util/shell/commands/SetGroupsCommand.java   |    17 +-
 .../util/shell/commands/SetIterCommand.java     |    83 +-
 .../util/shell/commands/SetScanIterCommand.java |    36 +-
 .../shell/commands/SetShellIterCommand.java     |   122 +
 .../ShellPluginConfigurationCommand.java        |   146 +
 .../core/util/shell/commands/SleepCommand.java  |     4 +-
 .../commands/SystemPermissionsCommand.java      |     5 +-
 .../core/util/shell/commands/TableCommand.java  |    10 +-
 .../util/shell/commands/TableOperation.java     |    30 +-
 .../shell/commands/TablePermissionsCommand.java |     5 +-
 .../core/util/shell/commands/TablesCommand.java |    14 +-
 .../core/util/shell/commands/TraceCommand.java  |    18 +-
 .../core/util/shell/commands/UserCommand.java   |    10 +-
 .../shell/commands/UserPermissionsCommand.java  |     6 +-
 .../core/util/shell/commands/UsersCommand.java  |     5 +-
 .../core/util/shell/commands/WhoAmICommand.java |     2 +-
 .../accumulo/core/zookeeper/ZooCache.java       |    40 +
 .../apache/accumulo/core/zookeeper/ZooUtil.java |     4 +-
 core/src/main/scripts/generate-thrift.sh        |   110 +
 core/src/main/thrift/client.thrift              |    40 +-
 core/src/main/thrift/data.thrift                |     5 +-
 core/src/main/thrift/gc.thrift                  |     3 +-
 core/src/main/thrift/master.thrift              |    41 +-
 core/src/main/thrift/security.thrift            |    24 +-
 core/src/main/thrift/tabletserver.thrift        |    82 +-
 core/src/main/thrift/thrift.sh                  |    58 -
 .../accumulo/core/cli/TestClientOpts.java       |    91 +
 .../core/client/BatchWriterConfigTest.java      |   179 +
 .../core/client/ClientSideIteratorTest.java     |    22 +-
 .../accumulo/core/client/RowIteratorTest.java   |    22 +-
 .../accumulo/core/client/TestThrift1474.java    |    98 +
 .../accumulo/core/client/admin/FindMaxTest.java |     6 +-
 .../client/admin/TableOperationsHelperTest.java |    13 +-
 .../core/client/impl/TabletLocatorImplTest.java |    97 +-
 .../mapreduce/AccumuloFileOutputFormatTest.java |   236 +-
 .../mapreduce/AccumuloInputFormatTest.java      |   295 +-
 .../mapreduce/AccumuloOutputFormatTest.java     |   175 +-
 .../mapreduce/AccumuloRowInputFormatTest.java   |   198 +-
 .../lib/partition/RangePartitionerTest.java     |    17 +-
 .../core/client/mock/MockConnectorTest.java     |    58 +-
 .../client/mock/MockTableOperationsTest.java    |   205 +-
 .../core/client/mock/TestBatchScanner821.java   |    65 +
 .../core/client/mock/TransformIterator.java     |    32 +
 .../apache/accumulo/core/conf/PropertyTest.java |     6 +
 .../org/apache/accumulo/core/data/KeyTest.java  |     4 +-
 .../apache/accumulo/core/data/MutationTest.java |   136 +-
 .../apache/accumulo/core/data/OldMutation.java  |   492 +
 .../core/file/BloomFilterLayerLookupTest.java   |   140 +
 .../file/blockfile/cache/TestLruBlockCache.java |    26 +-
 .../file/rfile/AuthorizationFilterTest.java     |     4 +-
 .../core/file/rfile/BlockIndexTest.java         |   176 +
 .../core/file/rfile/MultiLevelIndexTest.java    |     2 +-
 .../accumulo/core/file/rfile/RFileTest.java     |   104 +-
 .../core/file/rfile/RelativeKeyTest.java        |   262 +
 .../core/file/rfile/TimestampFilterTest.java    |     4 +-
 .../core/iterators/AggregatingIteratorTest.java |     1 +
 .../iterators/FirstEntryInRowIteratorTest.java  |   110 +
 .../iterators/aggregation/NumSummationTest.java |     1 +
 .../conf/AggregatorConfigurationTest.java       |     1 +
 .../iterators/system/VisibilityFilterTest.java  |    52 +
 .../core/iterators/user/CombinerTest.java       |    36 +
 .../core/iterators/user/FilterTest.java         |    30 +
 .../iterators/user/IndexedDocIteratorTest.java  |    30 +-
 .../user/IntersectingIteratorTest.java          |    47 +-
 .../core/iterators/user/RegExFilterTest.java    |    21 +-
 .../core/iterators/user/RowFilterTest.java      |     6 +-
 .../user/TransformingIteratorTest.java          |   690 +
 .../core/security/AuthorizationsTest.java       |     8 +-
 .../core/security/ColumnVisibilityTest.java     |    33 +
 .../core/security/VisibilityEvaluatorTest.java  |    48 +-
 .../core/util/LocalityGroupUtilTest.java        |    20 +-
 .../shell/command/FormatterCommandTest.java     |    37 +-
 .../resources/disabled/conf/accumulo-site.xml   |   108 +
 .../apache/accumulo/core/file/rfile/ver_6.rf    |   Bin 0 -> 26167 bytes
 docs/README_UBUNTU                              |    44 +
 docs/examples/README.batch                      |     4 +-
 docs/examples/README.bloom                      |    28 +-
 docs/examples/README.bulkIngest                 |     8 +-
 docs/examples/README.client                     |    73 +
 docs/examples/README.dirlist                    |    16 +-
 docs/examples/README.export                     |    89 +
 docs/examples/README.filedata                   |     4 +-
 docs/examples/README.helloworld                 |     9 +-
 docs/examples/README.isolation                  |     4 +-
 docs/examples/README.mapred                     |     2 +-
 docs/examples/README.maxmutation                |     8 +-
 docs/examples/README.regex                      |    58 +
 docs/examples/README.rowhash                    |    59 +
 docs/examples/README.shard                      |    11 +-
 docs/examples/README.tabletofile                |    59 +
 docs/examples/README.terasort                   |    50 +
 docs/examples/README.visibility                 |     2 +-
 .../accumulo_developer_manual.tex               |   194 +
 docs/src/developer_manual/build.sh              |    48 +
 docs/src/developer_manual/developer_manual.tex  |   201 -
 docs/src/user_manual/build.sh                   |     7 +-
 docs/src/user_manual/chapters/analytics.tex     |     2 +-
 docs/src/user_manual/chapters/shell.tex         |     1 -
 .../chapters/table_configuration.tex            |    10 +-
 examples/instamo/README.md                      |    27 +
 examples/instamo/pom.xml                        |   117 +
 .../apache/accumulo/instamo/AccumuloApp.java    |    57 +
 .../accumulo/instamo/MapReduceExample.java      |    68 +
 .../instamo/src/main/resources/log4j.properties |     9 +
 .../instamo/ExampleAccumuloUnitTest.java        |    58 +
 examples/pom.xml                                |     2 +-
 examples/simple/pom.xml                         |    50 +-
 .../accumulo/examples/simple/client/Flush.java  |    20 +-
 .../simple/client/RandomBatchScanner.java       |    76 +-
 .../simple/client/RandomBatchWriter.java        |    91 +-
 .../simple/client/ReadWriteExample.java         |   201 +-
 .../examples/simple/client/RowOperations.java   |    48 +-
 .../simple/client/SequentialBatchWriter.java    |    57 +-
 .../examples/simple/combiner/StatsCombiner.java |     2 +-
 .../examples/simple/dirlist/FileCount.java      |    75 +-
 .../examples/simple/dirlist/Ingest.java         |    80 +-
 .../examples/simple/dirlist/QueryUtil.java      |    39 +-
 .../examples/simple/dirlist/Viewer.java         |    28 +-
 .../simple/filedata/CharacterHistogram.java     |    22 +-
 .../simple/filedata/FileDataIngest.java         |    92 +-
 .../examples/simple/filedata/FileDataQuery.java |     5 +-
 .../helloworld/InsertWithBatchWriter.java       |    32 +-
 .../helloworld/InsertWithOutputFormat.java      |    78 -
 .../examples/simple/helloworld/ReadData.java    |    42 +-
 .../simple/isolation/InterferenceTest.java      |    51 +-
 .../examples/simple/mapreduce/RegexExample.java |    43 +-
 .../examples/simple/mapreduce/RowHash.java      |    25 +-
 .../examples/simple/mapreduce/TableToFile.java  |    24 +-
 .../simple/mapreduce/TeraSortIngest.java        |    53 +-
 .../simple/mapreduce/UniqueColumns.java         |    63 +-
 .../examples/simple/mapreduce/WordCount.java    |    51 +-
 .../mapreduce/bulk/BulkIngestExample.java       |    49 +-
 .../simple/mapreduce/bulk/GenerateTestData.java |    28 +-
 .../simple/mapreduce/bulk/SetupTable.java       |    43 +-
 .../simple/mapreduce/bulk/VerifyIngest.java     |    37 +-
 .../examples/simple/shard/ContinuousQuery.java  |    49 +-
 .../accumulo/examples/simple/shard/Index.java   |    42 +-
 .../accumulo/examples/simple/shard/Query.java   |    42 +-
 .../accumulo/examples/simple/shard/Reverse.java |    39 +-
 .../examples/simple/dirlist/CountTest.java      |    23 +-
 .../simple/filedata/ChunkInputFormatTest.java   |   296 +-
 .../simple/filedata/ChunkInputStreamTest.java   |     7 +-
 examples/wikisearch/README                      |    68 -
 examples/wikisearch/README.parallel             |    65 -
 examples/wikisearch/ingest/bin/ingest.sh        |    46 -
 .../wikisearch/ingest/bin/ingest_parallel.sh    |    46 -
 .../ingest/conf/wikipedia.xml.example           |    43 -
 .../ingest/conf/wikipedia_parallel.xml.example  |    75 -
 examples/wikisearch/ingest/pom.xml              |   101 -
 .../wikisearch/ingest/src/assembly/dist.xml     |    38 -
 .../wikisearch/ingest/ArticleExtractor.java     |   207 -
 .../wikisearch/ingest/LRUOutputCombiner.java    |    75 -
 .../ingest/WikipediaConfiguration.java          |   198 -
 .../wikisearch/ingest/WikipediaIngester.java    |   208 -
 .../wikisearch/ingest/WikipediaInputFormat.java |   136 -
 .../wikisearch/ingest/WikipediaMapper.java      |   245 -
 .../ingest/WikipediaPartitionedIngester.java    |   309 -
 .../ingest/WikipediaPartitionedMapper.java      |   310 -
 .../wikisearch/ingest/WikipediaPartitioner.java |    89 -
 .../iterator/GlobalIndexUidCombiner.java        |    94 -
 .../wikisearch/iterator/TextIndexCombiner.java  |   102 -
 .../normalizer/LcNoDiacriticsNormalizer.java    |    49 -
 .../wikisearch/normalizer/NoOpNormalizer.java   |    23 -
 .../wikisearch/normalizer/Normalizer.java       |    32 -
 .../wikisearch/normalizer/NumberNormalizer.java |    42 -
 .../output/BufferingRFileRecordWriter.java      |   140 -
 .../output/SortingRFileOutputFormat.java        |   121 -
 .../wikisearch/protobuf/TermWeight.java         |   424 -
 .../examples/wikisearch/protobuf/Uid.java       |   470 -
 .../reader/AggregatingRecordReader.java         |   171 -
 .../wikisearch/reader/LfLineReader.java         |   173 -
 .../wikisearch/reader/LongLineRecordReader.java |   136 -
 .../examples/wikisearch/util/TextUtil.java      |   109 -
 .../ingest/src/main/protobuf/TermWeight.proto   |    28 -
 .../ingest/src/main/protobuf/Uid.proto          |    29 -
 .../ingest/src/main/protobuf/compile_protos.sh  |    19 -
 .../ingest/WikipediaInputSplitTest.java         |    69 -
 .../wikisearch/iterator/GlobalIndexUidTest.java |   192 -
 .../wikisearch/iterator/TextIndexTest.java      |   185 -
 .../normalizer/testNumberNormalizer.java        |    90 -
 .../reader/AggregatingRecordReaderTest.java     |   287 -
 .../src/test/resources/enwiki-20110901-001.xml  |   153 -
 examples/wikisearch/pom.xml                     |   237 -
 examples/wikisearch/query-war/pom.xml           |    51 -
 .../src/main/webapp/WEB-INF/jboss-web.xml       |    20 -
 .../query-war/src/main/webapp/WEB-INF/web.xml   |    57 -
 .../query-war/src/main/webapp/style.xsl         |    47 -
 .../wikisearch/query-war/src/main/webapp/ui.jsp |   131 -
 .../query-war/src/test/resources/test.xml       |  1651 -
 examples/wikisearch/query/pom.xml               |   139 -
 examples/wikisearch/query/src/assembly/dist.xml |    40 -
 .../wikisearch/function/QueryFunctions.java     |    68 -
 .../iterator/AbstractEvaluatingIterator.java    |   323 -
 .../wikisearch/iterator/AndIterator.java        |   934 -
 .../iterator/BooleanLogicIterator.java          |  1949 -
 .../iterator/BooleanLogicTreeNode.java          |   523 -
 .../iterator/DefaultIteratorEnvironment.java    |    74 -
 .../wikisearch/iterator/EvaluatingIterator.java |   115 -
 .../wikisearch/iterator/FieldIndexIterator.java |   736 -
 .../iterator/OptimizedQueryIterator.java        |   205 -
 .../wikisearch/iterator/OrIterator.java         |   822 -
 .../wikisearch/iterator/ReadAheadIterator.java  |   297 -
 .../iterator/UniqFieldNameValueIterator.java    |   342 -
 .../examples/wikisearch/jexl/Arithmetic.java    |   126 -
 .../wikisearch/logic/AbstractQueryLogic.java    |   883 -
 .../examples/wikisearch/logic/ContentLogic.java |   109 -
 .../examples/wikisearch/logic/QueryLogic.java   |   195 -
 .../examples/wikisearch/parser/EventFields.java |   227 -
 .../parser/FieldIndexQueryReWriter.java         |  1139 -
 .../parser/JexlOperatorConstants.java           |   105 -
 .../wikisearch/parser/QueryEvaluator.java       |   291 -
 .../examples/wikisearch/parser/QueryParser.java |   845 -
 .../wikisearch/parser/RangeCalculator.java      |  1199 -
 .../examples/wikisearch/parser/TreeBuilder.java |   675 -
 .../examples/wikisearch/parser/TreeNode.java    |   235 -
 .../examples/wikisearch/query/IQuery.java       |    66 -
 .../examples/wikisearch/query/Query.java        |   238 -
 .../examples/wikisearch/sample/Document.java    |    61 -
 .../examples/wikisearch/sample/Field.java       |    58 -
 .../examples/wikisearch/sample/Results.java     |    53 -
 .../examples/wikisearch/util/BaseKeyParser.java |    77 -
 .../wikisearch/util/FieldIndexKeyParser.java    |    71 -
 .../examples/wikisearch/util/KeyParser.java     |    70 -
 .../src/main/resources/META-INF/MANIFEST.MF     |     2 -
 .../main/resources/META-INF/ejb-jar.xml.example |    62 -
 .../logic/StandaloneStatusReporter.java         |    74 -
 .../wikisearch/logic/TestQueryLogic.java        |   195 -
 .../src/test/resources/enwiki-20110901-001.xml  |   153 -
 fate/pom.xml                                    |     2 +-
 .../org/apache/accumulo/fate/AdminUtil.java     |    28 +-
 .../org/apache/accumulo/fate/AgeOffStore.java   |   232 +
 .../java/org/apache/accumulo/fate/TStore.java   |    12 +-
 .../java/org/apache/accumulo/fate/ZooStore.java |    19 +-
 .../accumulo/fate/util/LoggingRunnable.java     |    17 +-
 .../accumulo/fate/zookeeper/IZooReader.java     |     2 +
 .../fate/zookeeper/IZooReaderWriter.java        |     2 +
 .../apache/accumulo/fate/zookeeper/ZooLock.java |     4 +-
 .../accumulo/fate/zookeeper/ZooQueueLock.java   |     5 +-
 .../accumulo/fate/zookeeper/ZooReader.java      |    33 +-
 .../fate/zookeeper/ZooReaderWriter.java         |    21 +-
 .../accumulo/fate/zookeeper/ZooSession.java     |    24 +-
 .../apache/accumulo/fate/zookeeper/ZooUtil.java |    38 +-
 .../apache/accumulo/fate/AgeOffStoreTest.java   |   161 +
 .../org/apache/accumulo/fate/SimpleStore.java   |   126 +
 .../zookeeper/DistributedReadWriteLockTest.java |    14 +-
 .../fate/zookeeper/TransactionWatcherTest.java  |     2 +-
 fate/src/test/resources/log4j.properties        |     9 +
 packages/deb/accumulo-native/postinst           |    12 +-
 packages/deb/accumulo/control                   |     2 +-
 packages/deb/accumulo/postinst                  |     4 +-
 pom.xml                                         |   201 +-
 proxy/README                                    |    48 +
 proxy/examples/python/README                    |    24 +
 proxy/examples/python/TestClient.py             |    46 +
 proxy/examples/ruby/README                      |    26 +
 proxy/examples/ruby/test_client.rb              |    49 +
 proxy/pom.xml                                   |   150 +
 proxy/proxy.properties                          |    11 +
 .../java/org/apache/accumulo/proxy/Proxy.java   |   117 +
 .../org/apache/accumulo/proxy/ProxyServer.java  |  1161 +
 .../apache/accumulo/proxy/TestProxyClient.java  |   166 +
 .../java/org/apache/accumulo/proxy/Util.java    |    62 +
 .../proxy/thrift/AccumuloException.java         |   402 +
 .../accumulo/proxy/thrift/AccumuloProxy.java    | 82065 +++++++++++++++++
 .../proxy/thrift/AccumuloSecurityException.java |   402 +
 .../accumulo/proxy/thrift/ActiveCompaction.java |  1431 +
 .../accumulo/proxy/thrift/ActiveScan.java       |  1593 +
 .../accumulo/proxy/thrift/BatchScanOptions.java |   865 +
 .../apache/accumulo/proxy/thrift/Column.java    |   635 +
 .../accumulo/proxy/thrift/ColumnUpdate.java     |   945 +
 .../accumulo/proxy/thrift/CompactionReason.java |    70 +
 .../accumulo/proxy/thrift/CompactionType.java   |    67 +
 .../accumulo/proxy/thrift/IOException.java      |   402 +
 .../accumulo/proxy/thrift/IteratorScope.java    |    64 +
 .../accumulo/proxy/thrift/IteratorSetting.java  |   763 +
 .../org/apache/accumulo/proxy/thrift/Key.java   |   846 +
 .../apache/accumulo/proxy/thrift/KeyExtent.java |   624 +
 .../apache/accumulo/proxy/thrift/KeyValue.java  |   518 +
 .../accumulo/proxy/thrift/KeyValueAndPeek.java  |   505 +
 .../proxy/thrift/NoMoreEntriesException.java    |   402 +
 .../apache/accumulo/proxy/thrift/PColumn.java   |   737 +
 .../accumulo/proxy/thrift/PColumnUpdate.java    |   848 +
 .../accumulo/proxy/thrift/PIteratorSetting.java |   763 +
 .../org/apache/accumulo/proxy/thrift/PKey.java  |   846 +
 .../apache/accumulo/proxy/thrift/PKeyValue.java |   518 +
 .../apache/accumulo/proxy/thrift/PRange.java    |   512 +
 .../accumulo/proxy/thrift/PScanResult.java      |   554 +
 .../proxy/thrift/PSystemPermission.java         |    79 +
 .../accumulo/proxy/thrift/PTablePermission.java |    73 +
 .../accumulo/proxy/thrift/PartialKey.java       |    73 +
 .../org/apache/accumulo/proxy/thrift/Range.java |   704 +
 .../accumulo/proxy/thrift/ScanColumn.java       |   527 +
 .../accumulo/proxy/thrift/ScanOptions.java      |   972 +
 .../accumulo/proxy/thrift/ScanResult.java       |   554 +
 .../apache/accumulo/proxy/thrift/ScanState.java |    64 +
 .../apache/accumulo/proxy/thrift/ScanType.java  |    61 +
 .../accumulo/proxy/thrift/SystemPermission.java |    79 +
 .../proxy/thrift/TableExistsException.java      |   402 +
 .../proxy/thrift/TableNotFoundException.java    |   402 +
 .../accumulo/proxy/thrift/TablePermission.java  |    73 +
 .../apache/accumulo/proxy/thrift/TimeType.java  |    61 +
 .../accumulo/proxy/thrift/UnknownScanner.java   |   402 +
 .../accumulo/proxy/thrift/UnknownWriter.java    |   402 +
 .../apache/accumulo/proxy/thrift/UserPass.java  |   513 +
 .../accumulo/proxy/thrift/WriterOptions.java    |   682 +
 proxy/src/main/scripts/generate-thrift.sh       |    24 +
 proxy/src/main/thrift/proxy.thrift              |   337 +
 .../accumulo/TestProxyInstanceOperations.java   |    82 +
 .../org/apache/accumulo/TestProxyReadWrite.java |   388 +
 .../accumulo/TestProxySecurityOperations.java   |   142 +
 .../accumulo/TestProxyTableOperations.java      |   219 +
 .../org/apache/accumulo/proxy/SimpleTest.java   |   487 +
 .../proxy/TestProxyInstanceOperations.java      |    81 +
 .../accumulo/proxy/TestProxyReadWrite.java      |   400 +
 .../proxy/TestProxySecurityOperations.java      |   142 +
 .../proxy/TestProxyTableOperations.java         |   214 +
 server/pom.xml                                  |    80 +-
 server/src/main/c++/Makefile                    |     8 +-
 server/src/main/c++/mlock/Makefile              |    56 -
 ...apache_accumulo_server_tabletserver_MLock.cc |    31 -
 server/src/main/c++/nativeMap/Makefile          |    28 +-
 .../org/apache/accumulo/server/Accumulo.java    |     9 +-
 .../server/cli/ClientOnDefaultTable.java        |    44 +
 .../server/cli/ClientOnRequiredTable.java       |    41 +
 .../apache/accumulo/server/cli/ClientOpts.java  |    23 +
 .../accumulo/server/client/BulkImporter.java    |    46 +-
 .../server/client/ClientServiceHandler.java     |   154 +-
 .../accumulo/server/client/HdfsZooInstance.java |    61 +-
 .../server/conf/ServerConfiguration.java        |     2 +-
 .../server/conf/TableConfiguration.java         |     1 +
 .../accumulo/server/conf/ZooConfiguration.java  |     2 +-
 .../server/constraints/ConstraintChecker.java   |    68 +-
 .../server/constraints/ConstraintLoader.java    |    56 -
 .../server/data/ServerColumnUpdate.java         |    36 +
 .../accumulo/server/data/ServerMutation.java    |    88 +
 .../org/apache/accumulo/server/fate/Admin.java  |    53 +-
 .../server/gc/GarbageCollectWriteAheadLogs.java |     7 +-
 .../server/gc/SimpleGarbageCollector.java       |   122 +-
 .../accumulo/server/logger/LogFileValue.java    |    31 +-
 .../accumulo/server/logger/LogReader.java       |    77 +-
 .../accumulo/server/master/LiveTServerSet.java  |   233 +-
 .../apache/accumulo/server/master/Master.java   |   441 +-
 .../master/balancer/DefaultLoadBalancer.java    |    10 +-
 .../master/balancer/TableLoadBalancer.java      |    13 +-
 .../server/master/balancer/TabletBalancer.java  |     7 +-
 .../master/recovery/MapRRecoverLease.java       |    55 +
 .../server/master/recovery/RecoverLease.java    |    18 +-
 .../master/recovery/SubmitFileForRecovery.java  |     7 +
 .../server/master/state/MergeStats.java         |    13 +-
 .../server/master/state/MetaDataStateStore.java |    11 +-
 .../master/state/MetaDataTableScanner.java      |    10 +-
 .../master/state/RootTabletStateStore.java      |     7 +-
 .../server/master/state/SetGoalState.java       |     2 +-
 .../master/state/TabletStateChangeIterator.java |     2 +-
 .../master/state/tables/TableManager.java       |     2 +-
 .../server/master/tableOps/BulkImport.java      |    21 +-
 .../server/master/tableOps/CloneTable.java      |    12 +-
 .../server/master/tableOps/CompactRange.java    |    19 +-
 .../server/master/tableOps/CreateTable.java     |    30 +-
 .../server/master/tableOps/DeleteTable.java     |    31 +-
 .../server/master/tableOps/ExportTable.java     |   320 +
 .../server/master/tableOps/ImportTable.java     |   595 +
 .../server/master/tableOps/RenameTable.java     |     5 +-
 .../server/master/tableOps/TableRangeOp.java    |    20 +-
 .../master/tserverOps/ShutdownTServer.java      |    16 +-
 .../accumulo/server/metanalysis/FindTablet.java |    67 +-
 .../accumulo/server/metanalysis/IndexMeta.java  |    33 +-
 .../server/metanalysis/PrintEvents.java         |    39 +-
 .../server/metanalysis/package-info.java        |    18 +-
 .../server/metrics/MetricsConfiguration.java    |     4 +-
 .../apache/accumulo/server/monitor/Monitor.java |    72 +-
 .../server/monitor/ZooKeeperStatus.java         |    30 +-
 .../server/monitor/servlets/DefaultServlet.java |    99 +-
 .../server/monitor/servlets/JSONServlet.java    |    53 +-
 .../monitor/servlets/TServersServlet.java       |    73 +-
 .../server/monitor/servlets/VisServlet.java     |    17 +-
 .../server/monitor/servlets/XMLServlet.java     |     6 +-
 .../server/monitor/servlets/trace/Basic.java    |     3 +-
 .../monitor/servlets/trace/NullScanner.java     |    13 +-
 .../monitor/servlets/trace/ShowTrace.java       |     6 +-
 .../monitor/util/celltypes/CompactionsType.java |    12 +-
 .../monitor/util/celltypes/TableStateType.java  |     2 +-
 .../server/problems/ProblemReports.java         |     4 +-
 .../security/AuditedSecurityOperation.java      |   333 +
 .../accumulo/server/security/Auditor.java       |   234 -
 .../accumulo/server/security/Authenticator.java |    64 -
 .../server/security/SecurityConstants.java      |    39 +-
 .../server/security/SecurityOperation.java      |   852 +
 .../accumulo/server/security/SecurityUtil.java  |    87 -
 .../server/security/ZKAuthenticator.java        |   721 -
 .../server/security/handler/Authenticator.java  |    52 +
 .../server/security/handler/Authorizor.java     |    85 +
 .../security/handler/InsecureAuthenticator.java |   109 +
 .../security/handler/InsecurePermHandler.java   |   146 +
 .../security/handler/PermissionHandler.java     |   171 +
 .../security/handler/ZKAuthenticator.java       |   207 +
 .../server/security/handler/ZKAuthorizor.java   |   157 +
 .../server/security/handler/ZKPermHandler.java  |   343 +
 .../server/security/handler/ZKSecurityTool.java |   159 +
 .../accumulo/server/tabletserver/Compactor.java |   181 +-
 .../tabletserver/EncodedBinaryTreePath.java     |   195 -
 .../server/tabletserver/FileManager.java        |     7 +-
 .../server/tabletserver/InMemoryMap.java        |     2 +-
 .../tabletserver/LargestFirstMemoryManager.java |     2 +-
 .../server/tabletserver/MinorCompactor.java     |    94 +-
 .../server/tabletserver/MutationLog.java        |     3 +-
 .../accumulo/server/tabletserver/NativeMap.java |     6 +-
 .../accumulo/server/tabletserver/Tablet.java    |   105 +-
 .../server/tabletserver/TabletMutations.java    |    47 +
 .../server/tabletserver/TabletServer.java       |   573 +-
 .../TabletServerResourceManager.java            |    20 +-
 .../server/tabletserver/TabletStatsKeeper.java  |     2 +-
 .../server/tabletserver/TabletTime.java         |    12 +-
 .../server/tabletserver/log/DfsLogger.java      |    17 +-
 .../server/tabletserver/log/LogSorter.java      |     2 +-
 .../server/tabletserver/log/MultiReader.java    |     4 +-
 .../tabletserver/log/SortedLogRecovery.java     |    24 +-
 .../tabletserver/log/TabletServerLogger.java    |    12 +-
 .../mastermessage/MasterMessage.java            |     4 +-
 .../mastermessage/SplitReportMessage.java       |     7 +-
 .../mastermessage/TabletStatusMessage.java      |     7 +-
 .../server/test/BulkImportDirectory.java        |    45 +-
 .../accumulo/server/test/CreateRFiles.java      |    43 +-
 .../accumulo/server/test/CreateRandomRFile.java |     4 +
 .../accumulo/server/test/CreateTestTable.java   |    97 +-
 .../server/test/EstimateInMemMapOverhead.java   |     2 +-
 .../server/test/GCLotsOfCandidatesTest.java     |    17 +-
 .../accumulo/server/test/GetMasterStats.java    |    11 +-
 .../apache/accumulo/server/test/ListTables.java |     8 +-
 .../server/test/NativeMapConcurrencyTest.java   |    37 +-
 .../server/test/NativeMapPerformanceTest.java   |     2 +-
 .../accumulo/server/test/NullBatchWriter.java   |     2 +-
 .../server/test/QueryMetadataTable.java         |    73 +-
 .../accumulo/server/test/TestBinaryRows.java    |   108 +-
 .../apache/accumulo/server/test/TestIngest.java |   309 +-
 .../server/test/TestMultiTableIngest.java       |    87 +-
 .../accumulo/server/test/TestRandomDeletes.java |    56 +-
 .../accumulo/server/test/VerifyIngest.java      |    70 +-
 .../accumulo/server/test/WrongTabletTest.java   |    25 +-
 .../test/continuous/ContinuousBatchWalker.java  |    91 +-
 .../test/continuous/ContinuousIngest.java       |   181 +-
 .../server/test/continuous/ContinuousMoru.java  |    77 +-
 .../server/test/continuous/ContinuousQuery.java |    47 +-
 .../test/continuous/ContinuousScanner.java      |    86 +-
 .../continuous/ContinuousStatsCollector.java    |    37 +-
 .../test/continuous/ContinuousVerify.java       |    81 +-
 .../server/test/continuous/ContinuousWalk.java  |   134 +-
 .../server/test/continuous/GenSplits.java       |    45 +-
 .../server/test/continuous/Histogram.java       |     4 +-
 .../server/test/continuous/TimeBinner.java      |    44 +-
 .../test/continuous/UndefinedAnalyzer.java      |    45 +-
 .../server/test/functional/AddSplitTest.java    |     5 +-
 .../test/functional/BadIteratorMincTest.java    |     5 +-
 .../test/functional/BatchScanSplitTest.java     |     7 +-
 .../test/functional/BatchWriterFlushTest.java   |     8 +-
 .../server/test/functional/BloomFilterTest.java |    80 +-
 .../functional/BulkSplitOptimizationTest.java   |     4 +-
 .../server/test/functional/CacheTestClean.java  |     3 +-
 .../server/test/functional/ConcurrencyTest.java |     3 +-
 .../server/test/functional/ConstraintTest.java  |    15 +-
 .../test/functional/CreateAndUseTest.java       |     5 +-
 .../test/functional/DeleteEverythingTest.java   |     3 +-
 .../test/functional/DeleteRowsSplitTest.java    |     3 +-
 .../server/test/functional/DeleteRowsTest.java  |     3 +-
 .../test/functional/FateStarvationTest.java     |    77 +
 .../server/test/functional/FunctionalTest.java  |   129 +-
 .../server/test/functional/LargeRowTest.java    |    16 +-
 .../server/test/functional/LogicalTimeTest.java |     3 +-
 .../server/test/functional/MaxOpenTest.java     |     6 +-
 .../server/test/functional/MergeTest.java       |     3 +-
 .../server/test/functional/PermissionsTest.java |    38 +-
 .../server/test/functional/RowDeleteTest.java   |     3 +-
 .../server/test/functional/RunTests.java        |    20 +-
 .../test/functional/ScanIteratorTest.java       |     3 +-
 .../server/test/functional/ScanRangeTest.java   |     5 +-
 .../test/functional/ScanSessionTimeOutTest.java |     3 +-
 .../test/functional/ServerSideErrorTest.java    |     3 +-
 .../server/test/functional/SlowConstraint.java  |    41 +
 .../test/functional/SparseColumnFamilyTest.java |     3 +-
 .../server/test/functional/TimeoutTest.java     |   132 +
 .../server/test/functional/VisibilityTest.java  |    13 +-
 .../server/test/functional/ZombieTServer.java   |    18 +-
 .../metadata/MetadataBatchScanTest.java         |    12 +-
 .../performance/scan/CollectTabletStats.java    |   145 +-
 .../test/performance/thrift/NullTserver.java    |    88 +-
 .../server/test/randomwalk/Framework.java       |    36 +-
 .../accumulo/server/test/randomwalk/Module.java |    13 +-
 .../accumulo/server/test/randomwalk/State.java  |    33 +-
 .../test/randomwalk/concurrent/BatchWrite.java  |     3 +-
 .../test/randomwalk/concurrent/CreateUser.java  |     4 +-
 .../test/randomwalk/concurrent/StartAll.java    |    33 +
 .../randomwalk/concurrent/StopTabletServer.java |    82 +
 .../server/test/randomwalk/image/Commit.java    |     2 +-
 .../test/randomwalk/image/ImageFixture.java     |     2 +-
 .../server/test/randomwalk/image/Write.java     |     4 +-
 .../test/randomwalk/multitable/Commit.java      |     4 +-
 .../test/randomwalk/multitable/CopyTool.java    |    17 +-
 .../test/randomwalk/multitable/Write.java       |     2 +-
 .../randomwalk/security/AlterSystemPerm.java    |    10 +-
 .../test/randomwalk/security/AlterTable.java    |    18 +-
 .../randomwalk/security/AlterTablePerm.java     |    58 +-
 .../test/randomwalk/security/Authenticate.java  |    33 +-
 .../test/randomwalk/security/ChangePass.java    |    63 +-
 .../test/randomwalk/security/CreateTable.java   |    17 +-
 .../test/randomwalk/security/CreateUser.java    |    24 +-
 .../test/randomwalk/security/DropTable.java     |    34 +-
 .../test/randomwalk/security/DropUser.java      |    25 +-
 .../randomwalk/security/SecurityFixture.java    |    59 +-
 .../randomwalk/security/SecurityHelper.java     |    29 +
 .../test/randomwalk/security/SetAuths.java      |    27 +-
 .../test/randomwalk/security/TableOp.java       |    93 +-
 .../test/randomwalk/security/Validate.java      |    37 +-
 .../randomwalk/security/WalkingSecurity.java    |   369 +
 .../test/randomwalk/sequential/BatchVerify.java |   134 +-
 .../test/randomwalk/sequential/Commit.java      |     2 +-
 .../randomwalk/sequential/MapRedVerifyTool.java |    15 +-
 .../test/randomwalk/sequential/Write.java       |     4 +-
 .../test/randomwalk/shard/DeleteSomeDocs.java   |     7 +-
 .../test/randomwalk/shard/ExportIndex.java      |   115 +
 .../server/test/randomwalk/shard/Reindex.java   |     3 +-
 .../test/randomwalk/shard/VerifyIndex.java      |     2 +-
 .../server/test/scalability/Ingest.java         |    10 +-
 .../accumulo/server/test/scalability/Run.java   |    34 +-
 .../server/test/scalability/ScaleTest.java      |     2 +-
 .../accumulo/server/trace/TraceFileSystem.java  |     7 +-
 .../accumulo/server/trace/TraceServer.java      |    30 +-
 .../server/util/AddFilesWithMissingEntries.java |    43 +-
 .../org/apache/accumulo/server/util/Admin.java  |   156 +-
 .../accumulo/server/util/ChangeSecret.java      |    36 +-
 .../server/util/CheckForMetadataProblems.java   |    89 +-
 .../accumulo/server/util/CleanZookeeper.java    |    21 +-
 .../accumulo/server/util/DeleteZooInstance.java |    55 +-
 .../accumulo/server/util/DumpZookeeper.java     |    20 +-
 .../accumulo/server/util/EmbeddedWebServer.java |   126 +-
 .../server/util/FindOfflineTablets.java         |    65 +
 .../apache/accumulo/server/util/Initialize.java |   139 +-
 .../accumulo/server/util/ListInstances.java     |    62 +-
 .../accumulo/server/util/LocalityCheck.java     |    12 +-
 .../accumulo/server/util/MetadataTable.java     |   128 +-
 .../server/util/OfflineMetadataScanner.java     |     2 +
 .../accumulo/server/util/RandomWriter.java      |    48 +-
 .../util/RemoveEntriesForMissingFiles.java      |    45 +-
 .../accumulo/server/util/RestoreZookeeper.java  |    28 +-
 .../accumulo/server/util/SendLogToChainsaw.java |   194 +-
 .../accumulo/server/util/TServerUtils.java      |    60 +-
 .../accumulo/server/util/TableDiskUsage.java    |    25 +-
 .../accumulo/server/util/TabletIterator.java    |     7 +-
 .../accumulo/server/util/TabletServerLocks.java |    17 +-
 .../server/util/VerifyTabletAssignments.java    |   123 +-
 .../org/apache/accumulo/server/util/ZooZap.java |    53 +-
 .../accumulo/server/util/time/SimpleTimer.java  |    29 +-
 .../server/zookeeper/DistributedWorkQueue.java  |     3 +-
 .../server/zookeeper/TransactionWatcher.java    |     4 +-
 .../accumulo/server/zookeeper/ZooQueueLock.java |     8 +-
 .../server/zookeeper/ZooReaderWriter.java       |     6 +-
 server/src/main/resources/web/vis.js            |   402 +
 server/src/main/resources/web/vis.xml           |   404 -
 .../server/client/BulkImporterTest.java         |     2 +-
 .../constraints/MetadataConstraintsTest.java    |    16 +-
 .../server/data/ServerMutationTest.java         |    79 +
 .../accumulo/server/gc/TestConfirmDeletes.java  |    14 +-
 .../accumulo/server/logger/LogFileTest.java     |    36 +-
 .../accumulo/server/master/TestMergeState.java  |    26 +-
 .../balancer/DefaultLoadBalancerTest.java       |    37 +-
 .../master/balancer/TableLoadBalancerTest.java  |     2 +-
 .../server/monitor/ZooKeeperStatusTest.java     |    56 +
 .../server/security/ZKAuthenticatorTest.java    |    87 -
 .../security/handler/ZKAuthenticatorTest.java   |    87 +
 .../tabletserver/log/SortedLogRecoveryTest.java |   112 +-
 .../server/test/iterator/RegExTest.java         |     3 +-
 .../apache/accumulo/server/util/CloneTest.java  |    50 +-
 .../server/util/TabletIteratorTest.java         |    11 +-
 start/pom.xml                                   |   102 +-
 .../java/org/apache/accumulo/start/Main.java    |    40 +-
 .../org/apache/accumulo/start/TestMain.java     |     8 +
 .../start/classloader/AccumuloClassLoader.java  |   428 +-
 .../AccumuloFilesystemAlterationMonitor.java    |   134 -
 .../vfs/AccumuloReloadingVFSClassLoader.java    |   155 +
 .../classloader/vfs/AccumuloVFSClassLoader.java |   320 +
 .../start/classloader/vfs/ContextManager.java   |   170 +
 .../vfs/PostDelegatingVFSClassLoader.java       |    52 +
 .../classloader/vfs/ReloadingClassLoader.java   |    24 +
 .../vfs/providers/HdfsFileAttributes.java       |    60 +
 .../providers/HdfsFileContentInfoFactory.java   |    50 +
 .../vfs/providers/HdfsFileObject.java           |   354 +
 .../vfs/providers/HdfsFileProvider.java         |    89 +
 .../vfs/providers/HdfsFileSystem.java           |   165 +
 .../providers/HdfsFileSystemConfigBuilder.java  |    48 +
 .../vfs/providers/HdfsRandomAccessContent.java  |   362 +
 .../classloader/vfs/providers/package.html      |    19 +
 .../java/org/apache/accumulo/start/Test.java    |   244 -
 .../AccumuloReloadingVFSClassLoaderTest.java    |   128 +
 .../vfs/AccumuloVFSClassLoaderTest.java         |   130 +
 .../classloader/vfs/ContextManagerTest.java     |   207 +
 .../providers/ReadOnlyHdfsFileProviderTest.java |   246 +
 .../vfs/providers/VfsClassLoaderTest.java       |   145 +
 .../apache/accumulo/test/AccumuloDFSBase.java   |   147 +
 start/src/test/resources/HelloWorld.jar         |   Bin 0 -> 1008 bytes
 test/compat/diffAPI.pl                          |    62 +-
 test/compat/japi-compliance/README              |    14 +
 .../japi-compliance/japi-accumulo-1.4.xml       |    18 +
 .../japi-compliance/japi-accumulo-1.5.xml       |    19 +
 test/pom.xml                                    |   112 +
 .../accumulo/test/MiniAccumuloCluster.java      |   364 +
 .../accumulo/test/MiniAccumuloConfig.java       |    88 +
 .../accumulo/test/MiniAccumuloClusterTest.java  |   192 +
 test/src/test/resources/FooFilter.jar           |   Bin 0 -> 1645 bytes
 test/src/test/resources/conf/accumulo-site.xml  |   109 +
 test/system/auto/JavaTest.py                    |     2 +-
 test/system/auto/TestUtils.py                   |    74 +-
 test/system/auto/config.py                      |     0
 test/system/auto/run.py                         |     3 +-
 test/system/auto/simple/baditerminc.py          |     0
 test/system/auto/simple/binary.py               |     8 +-
 test/system/auto/simple/bulk.py                 |     8 +-
 test/system/auto/simple/combiner.py             |     0
 test/system/auto/simple/compaction.py           |     2 +-
 test/system/auto/simple/delete.py               |     4 +-
 test/system/auto/simple/deleterows.py           |     2 +-
 test/system/auto/simple/dynamic.py              |    54 +-
 test/system/auto/simple/dynamicThreadPools.py   |    14 +-
 test/system/auto/simple/examples.py             |   265 +-
 test/system/auto/simple/fateStartvation.py      |    30 +
 test/system/auto/simple/gc.py                   |    17 +-
 test/system/auto/simple/mapreduce.py            |    27 +-
 test/system/auto/simple/merge.py                |     1 +
 test/system/auto/simple/readwrite.py            |     6 +-
 test/system/auto/simple/shell.py                |     8 +
 test/system/auto/simple/shutdown.py             |    20 +-
 test/system/auto/simple/split.py                |    13 +-
 test/system/auto/simple/timeout.py              |    29 +
 test/system/auto/simple/wal.py                  |    57 -
 test/system/auto/simple/zoo.py                  |     2 +-
 test/system/auto/simple/zooCacheTest.py         |     2 +-
 test/system/auto/stress/batchWrite.py           |     4 +-
 test/system/auto/stress/halfDead.py             |     9 +-
 test/system/auto/stress/migrations.py           |     0
 test/system/auto/stress/restart.py              |    23 +-
 test/system/bench/README                        |     9 +-
 test/system/bench/cloudstone1/cloudstone1.py    |     1 +
 test/system/bench/lib/Benchmark.py              |    10 +-
 test/system/bench/lib/CreateTablesBenchmark.py  |    15 +-
 test/system/bench/lib/IngestBenchmark.py        |    21 +-
 test/system/bench/lib/RowHashBenchmark.py       |    61 +-
 test/system/bench/lib/TableSplitsBenchmark.py   |    26 +-
 test/system/bench/lib/TeraSortBenchmark.py      |    19 +-
 test/system/bench/lib/cloudshell.py             |     2 +-
 test/system/bench/lib/path.py                   |     4 +-
 test/system/bench/run.py                        |     3 +
 test/system/continuous/agitator.pl              |     5 +
 .../system/continuous/continuous-env.sh.example |    12 +
 test/system/continuous/report.pl                |     2 +-
 test/system/continuous/run-moru.sh              |     2 +-
 test/system/continuous/run-verify.sh            |    11 +-
 test/system/continuous/start-agitator.sh        |     2 +
 test/system/continuous/start-batchwalkers.sh    |     8 +-
 test/system/continuous/start-ingest.sh          |    11 +-
 test/system/continuous/start-scanners.sh        |     8 +-
 test/system/continuous/start-stats.sh           |     2 +-
 test/system/continuous/start-walkers.sh         |     8 +-
 test/system/randomwalk/bin/reset-cluster.sh     |     0
 test/system/randomwalk/bin/start-local.sh       |     9 +-
 .../randomwalk/conf/modules/Concurrent.xml      |    10 +
 .../system/randomwalk/conf/modules/Security.xml |   446 +-
 test/system/randomwalk/conf/modules/Shard.xml   |     7 +-
 test/system/test1/ingest_test.sh                |    10 +-
 test/system/test1/ingest_test_2.sh              |    10 +-
 test/system/test1/verify_test.sh                |    10 +-
 test/system/test1/verify_test_2.sh              |    10 +-
 test/system/test2/concurrent.sh                 |    90 +-
 test/system/test3/bigrow.sh                     |    12 +-
 test/system/test4/bulk_import_test.sh           |    44 +-
 test/system/test5/insert_test.sh                |    18 -
 test/system/upgrade_test.sh                     |    25 +-
 trace/pom.xml                                   |    32 +-
 .../cloudtrace/instrument/impl/MilliSpan.java   |     2 +
 .../instrument/receivers/AsyncSpanReceiver.java |     2 +-
 .../cloudtrace/instrument/thrift/TraceWrap.java |     6 +-
 .../accumulo/cloudtrace/thrift/RemoteSpan.java  |   591 +-
 .../cloudtrace/thrift/SpanReceiver.java         |   304 +-
 .../accumulo/cloudtrace/thrift/TInfo.java       |   243 +-
 .../accumulo/cloudtrace/thrift/TestService.java |   520 +-
 trace/src/main/scripts/generate-thrift.sh       |    24 +
 trace/src/main/thrift/cloudtrace.thrift         |     1 +
 .../cloudtrace/instrument/CountSamplerTest.java |     2 +-
 .../cloudtrace/instrument/TracerTest.java       |     4 +-
 trace/thrift.sh                                 |    39 -
 1071 files changed, 183860 insertions(+), 52974 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/README
----------------------------------------------------------------------
diff --cc README
index 41f6efc,9b8edae..7cf2c69
--- a/README
+++ b/README
@@@ -62,8 -70,24 +70,24 @@@ the machines in the cluster and that ha
  found in the same location on every machine in the cluster.  You will need to
  have password-less ssh set up as described in the hadoop documentation. 
  
- You will need to have hadoop installed and configured on your system.
- Accumulo ACCUMULO-652-SNAPSHOT has been tested with hadoop version 0.20.2.
+ You will need to have hadoop installed and configured on your system.  Accumulo
 -1.5.0-SNAPSHOT has been tested with hadoop version 0.20.2.  To avoid data loss,
 -you must enable HDFS durable sync.  How you enable this depends on your version
 -of Hadoop. Please consult the table below for information regarding your version.
 -If you need to set the coniguration, please be sure to restart HDFS. See 
 -ACCUMULO-623 for more information.
++ACCUMULO-652-SNAPSHOT has been tested with hadoop version 0.20.2.  To avoid
++data loss, you must enable HDFS durable sync.  How you enable this depends on
++your version of Hadoop. Please consult the table below for information
++regarding your version.  If you need to set the coniguration, please be sure to
++restart HDFS. See ACCUMULO-623 for more information.
+ 
+ HADOOP RELEASE          VERSION           SYNC NAME             DEFAULT
+ Apache Hadoop           0.20.205          dfs.support.append    false
+ Apache Hadoop            0.23.x           dfs.support.append    true
+ Apache Hadoop             1.0.x           dfs.support.append    false
+ Apache Hadoop             1.1.x           dfs.durable.sync      true
+ Apache Hadoop          2.0.0-2.0.2        dfs.support.append    true
+ Cloudera CDH             3u0-3u3             ????               true
+ Cloudera CDH               3u4            dfs.support.append    true
+ Hortonworks HDP           `1.0            dfs.support.append    false
+ Hortonworks HDP           `1.1            dfs.support.append    false
+ 
  
  The example accumulo configuration files are placed in directories based on the 
  memory footprint for the accumulo processes.  If you are using native libraries

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/assemble/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/bin/config.sh
----------------------------------------------------------------------
diff --cc bin/config.sh
index ca5e575,d657929..482f567
--- a/bin/config.sh
+++ b/bin/config.sh
@@@ -46,26 -46,26 +46,26 @@@ mkdir -p $ACCUMULO_LOG_DIR 2>/dev/nul
  export ACCUMULO_LOG_DIR
  
  if [ -z ${ACCUMULO_VERSION} ]; then
 -        ACCUMULO_VERSION=1.5.0-SNAPSHOT
 +        ACCUMULO_VERSION=ACCUMULO-652-SNAPSHOT
  fi
  
- if [ -z "$HADOOP_HOME" ]
+ if [ -z "$HADOOP_PREFIX" ]
  then
-    HADOOP_HOME="`which hadoop`"
-    if [ -z "$HADOOP_HOME" ]
+    HADOOP_PREFIX="`which hadoop`"
+    if [ -z "$HADOOP_PREFIX" ]
     then
-       echo "You must set HADOOP_HOME"
+       echo "You must set HADOOP_PREFIX"
        exit 1
     fi
-    HADOOP_HOME=`dirname $HADOOP_HOME`
-    HADOOP_HOME=`dirname $HADOOP_HOME`
+    HADOOP_PREFIX=`dirname $HADOOP_PREFIX`
+    HADOOP_PREFIX=`dirname $HADOOP_PREFIX`
  fi
- if [ ! -d "$HADOOP_HOME" ]
+ if [ ! -d "$HADOOP_PREFIX" ]
  then
-     echo "$HADOOP_HOME is not a directory"
+     echo "$HADOOP_PREFIX is not a directory"
      exit 1
  fi
- export HADOOP_HOME
+ export HADOOP_PREFIX
  
  if [ ! -f "$ACCUMULO_HOME/conf/masters" -o ! -f "$ACCUMULO_HOME/conf/slaves" ]
  then

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/Constants.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/Constants.java
index ccd496f,99726d9..cecde9c
--- a/core/src/main/java/org/apache/accumulo/core/Constants.java
+++ b/core/src/main/java/org/apache/accumulo/core/Constants.java
@@@ -28,7 -30,11 +30,11 @@@ import org.apache.hadoop.fs.Path
  import org.apache.hadoop.io.Text;
  
  public class Constants {
+   public static final Charset UTF8 = Charset.forName("UTF-8");
 -  public static final String VERSION = "1.5.0-SNAPSHOT";
 +  public static final String VERSION = "ACCUMULO-652-SNAPSHOT";
+   
+   // versions should never be negative
+   public static final Integer WIRE_VERSION = 2;
    public static final int DATA_VERSION = 4;
    public static final int PREV_DATA_VERSION = 3;
    

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
index 9a78dad,15918ec..502d237
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
@@@ -39,9 -38,6 +39,8 @@@ import org.apache.accumulo.core.file.bl
  import org.apache.accumulo.core.file.blockfile.BlockFileReader;
  import org.apache.accumulo.core.file.blockfile.BlockFileWriter;
  import org.apache.accumulo.core.file.rfile.bcfile.Utils;
 +import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
 +import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
- import org.apache.accumulo.core.security.ColumnVisibility;
  import org.apache.hadoop.io.WritableComparable;
  
  public class MultiLevelIndex {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
index 2cb717e,fe21f02..77813de
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
@@@ -55,21 -54,12 +54,16 @@@ import org.apache.accumulo.core.file.rf
  import org.apache.accumulo.core.file.rfile.MultiLevelIndex.IndexEntry;
  import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader.IndexIterator;
  import org.apache.accumulo.core.file.rfile.RelativeKey.MByteSequence;
+ import org.apache.accumulo.core.file.rfile.RelativeKey.SkippR;
  import org.apache.accumulo.core.file.rfile.bcfile.MetaBlockDoesNotExist;
 +import org.apache.accumulo.core.iterators.Filterer;
  import org.apache.accumulo.core.iterators.IterationInterruptedException;
  import org.apache.accumulo.core.iterators.IteratorEnvironment;
 +import org.apache.accumulo.core.iterators.Predicate;
  import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
 +import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
  import org.apache.accumulo.core.iterators.system.HeapIterator;
- import org.apache.accumulo.core.security.ColumnVisibility;
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.FSDataInputStream;
- import org.apache.hadoop.fs.FileSystem;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.io.Text;
  import org.apache.hadoop.io.Writable;
  import org.apache.log4j.Logger;
  
@@@ -376,8 -366,9 +372,8 @@@ public class RFile 
        }
      }
      
- 
+     @Override
      public void append(Key key, Value value) throws IOException {
 -      
        if (dataClosed) {
          throw new IllegalStateException("Cannont append, data closed");
        }
@@@ -1086,114 -1086,5 +1122,22 @@@
          lgr.setInterruptFlag(interruptFlag);
        }
      }
 +    
 +    TimestampRangePredicate timestampFilter = null;
 +    ColumnVisibilityPredicate columnVisibilityPredicate = null;
 +    
 +    /* (non-Javadoc)
 +     * @see org.apache.accumulo.core.iterators.Filterer#applyFilter(org.apache.accumulo.core.iterators.Predicate)
 +     */
 +    @Override
 +    public void applyFilter(Predicate<Key,Value> filter, boolean required) {
 +      if(required)
 +        throw new IllegalArgumentException("RFile cannot guarantee filtering");
 +      // the HeapIterator will pass this filter on to its children, a collection of LocalityGroupReaders
 +      if(filter instanceof TimestampRangePredicate)
 +        this.timestampFilter = (TimestampRangePredicate)filter;
 +      if(filter instanceof ColumnVisibilityPredicate)
 +        this.columnVisibilityPredicate = (ColumnVisibilityPredicate)filter;
 +    }
    }
-   
-   public static void main(String[] args) throws Exception {
-     Configuration conf = new Configuration();
-     FileSystem fs = FileSystem.get(conf);
-     
-     int max_row = 10000;
-     int max_cf = 10;
-     int max_cq = 10;
-     
-     CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(fs, new Path("/tmp/test.rf"), "gz", conf);
-     RFile.Writer w = new RFile.Writer(_cbw, 100000);
-     
-     w.startDefaultLocalityGroup();
-     
-     int c = 0;
-     
-     for (int i = 0; i < max_row; i++) {
-       Text row = new Text(String.format("R%06d", i));
-       for (int j = 0; j < max_cf; j++) {
-         Text cf = new Text(String.format("CF%06d", j));
-         for (int k = 0; k < max_cq; k++) {
-           Text cq = new Text(String.format("CQ%06d", k));
-           w.append(new Key(row, cf, cq), new Value((c++ + "").getBytes()));
-         }
-       }
-     }
-     
-     w.close();
- 
-     long t1 = System.currentTimeMillis();
-     FSDataInputStream fsin = fs.open(new Path("/tmp/test.rf"));
-     long t2 = System.currentTimeMillis();
-     CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(fs, new Path("/tmp/test.rf"), conf, null, null);
-     RFile.Reader r = new RFile.Reader(_cbr);
-     long t3 = System.currentTimeMillis();
-     
-     System.out.println("Open time " + (t2 - t1) + " " + (t3 - t2));
-     
-     SortedKeyValueIterator<Key,Value> rd = r.deepCopy(null);
-     SortedKeyValueIterator<Key,Value> rd2 = r.deepCopy(null);
-     
-     Random rand = new Random(10);
-     
-     seekRandomly(100, max_row, max_cf, max_cq, r, rand);
-     
-     rand = new Random(10);
-     seekRandomly(100, max_row, max_cf, max_cq, rd, rand);
-     
-     rand = new Random(10);
-     seekRandomly(100, max_row, max_cf, max_cq, rd2, rand);
-     
-     r.closeDeepCopies();
-     
-     seekRandomly(100, max_row, max_cf, max_cq, r, rand);
-     
-     rd = r.deepCopy(null);
-     
-     seekRandomly(100, max_row, max_cf, max_cq, rd, rand);
-     
-     r.close();
-     fsin.close();
-     
-     seekRandomly(100, max_row, max_cf, max_cq, r, rand);
-   }
-   
-   private static void seekRandomly(int num, int max_row, int max_cf, int max_cq, SortedKeyValueIterator<Key,Value> rd, Random rand) throws Exception {
-     long t1 = System.currentTimeMillis();
-     
-     for (int i = 0; i < num; i++) {
-       Text row = new Text(String.format("R%06d", rand.nextInt(max_row)));
-       Text cf = new Text(String.format("CF%06d", rand.nextInt(max_cf)));
-       Text cq = new Text(String.format("CQ%06d", rand.nextInt(max_cq)));
-       
-       Key sk = new Key(row, cf, cq);
-       rd.seek(new Range(sk, null), new ArrayList<ByteSequence>(), false);
-       if (!rd.hasTop() || !rd.getTopKey().equals(sk)) {
-         System.err.println(sk + " != " + rd.getTopKey());
-       }
-       
-     }
-     
-     long t2 = System.currentTimeMillis();
-     
-     double delta = ((t2 - t1) / 1000.0);
-     System.out.println("" + delta + " " + num / delta);
-     
-     System.gc();
-     System.gc();
-     System.gc();
-     
-     Thread.sleep(3000);
-   }
  }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/iterators/system/GenericFilterer.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/iterators/system/GenericFilterer.java
index d531d52,0000000..7d619f4
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/GenericFilterer.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/GenericFilterer.java
@@@ -1,104 -1,0 +1,105 @@@
 +package org.apache.accumulo.core.iterators.system;
 +
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +
 +import org.apache.accumulo.core.data.ByteSequence;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.Filterer;
 +import org.apache.accumulo.core.iterators.Predicate;
 +import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.iterators.WrappingIterator;
 +
 +public class GenericFilterer extends WrappingIterator implements Filterer<Key,Value> {
 +  
 +  private ArrayList<Predicate<Key,Value>> filters = new ArrayList<Predicate<Key,Value>>();
 +  
 +  private Key topKey;
 +  private Value topValue;
 +  
 +  public GenericFilterer(SortedKeyValueIterator<Key,Value> source) {
 +    setSource(source);
 +  }
 +
 +  public GenericFilterer() {
 +  }
 +  
 +  @Override
 +  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
 +    topKey = null;
 +    topValue = null;
 +    super.seek(range, columnFamilies, inclusive);
 +  }
 +
 +  @Override
 +  public void next() throws IOException {
 +    topKey = null;
 +    topValue = null;
 +    super.next();
 +  }
 +
 +  @Override
 +  public Key getTopKey() {
 +    if(topKey == null)
 +      hasTop();
 +    return topKey;
 +  }
 +  
 +  @Override
 +  public Value getTopValue() {
 +    if(topValue == null)
 +      hasTop();
 +    return topValue;
 +  }
 +  
 +  @Override
 +  public boolean hasTop() {
 +    if(topKey == null)
 +    {
 +      while(super.hasTop())
 +      {
 +        topKey = super.getTopKey();
 +        topValue = super.getTopValue();
 +        // check all the filters to see if we found a valid key/value pair
 +        boolean keep = true;
 +        for(Predicate<Key,Value> filter: filters)
 +        {
 +          if(!filter.evaluate(topKey, topValue))
 +          {
 +            keep = false;
 +            try {
 +              super.next();
 +            } catch (IOException e) {
 +              throw new RuntimeException(e);
 +            }
 +            break;
 +          }
 +        }
 +        if(keep == true)
 +          return true;
 +      }
 +      // ran out of key/value pairs
 +      topKey = null;
 +      topValue = null;
 +      return false;
 +    }
 +    else
 +    {
 +      return true;
 +    }
 +  }
 +  
 +  @Override
 +  public void applyFilter(Predicate<Key,Value> filter, boolean required) {
 +    filters.add(filter);
 +    if(getSource() instanceof Filterer)
 +    {
++      @SuppressWarnings("unchecked")
 +      Filterer<Key,Value> source = (Filterer<Key,Value>)getSource();
 +      source.applyFilter(filter, false);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/iterators/system/HeapIterator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIterator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
index 5deed97,4902e61..cbe6a12
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
@@@ -22,45 -19,39 +22,47 @@@ import java.util.Map
  import org.apache.accumulo.core.data.Key;
  import org.apache.accumulo.core.data.Value;
  import org.apache.accumulo.core.iterators.Filter;
 +import org.apache.accumulo.core.iterators.Filterer;
  import org.apache.accumulo.core.iterators.IteratorEnvironment;
  import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 +import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
  import org.apache.accumulo.core.security.Authorizations;
  import org.apache.accumulo.core.security.ColumnVisibility;
 -import org.apache.accumulo.core.security.VisibilityEvaluator;
 -import org.apache.accumulo.core.security.VisibilityParseException;
+ import org.apache.accumulo.core.util.BadArgumentException;
  import org.apache.accumulo.core.util.TextUtil;
  import org.apache.commons.collections.map.LRUMap;
  import org.apache.hadoop.io.Text;
  import org.apache.log4j.Logger;
  
++
++
  public class VisibilityFilter extends Filter {
 -  private VisibilityEvaluator ve;
++  private static final Logger log = Logger.getLogger(VisibilityFilter.class);
++
 +  private Authorizations auths;
    private Text defaultVisibility;
    private LRUMap cache;
    private Text tmpVis;
    
--  private static final Logger log = Logger.getLogger(VisibilityFilter.class);
--  
 -  public VisibilityFilter() {}
 -  
++  @SuppressWarnings("unchecked")
    public VisibilityFilter(SortedKeyValueIterator<Key,Value> iterator, Authorizations authorizations, byte[] defaultVisibility) {
 -    setSource(iterator);
 -    this.ve = new VisibilityEvaluator(authorizations);
 +    this.auths = authorizations;
      this.defaultVisibility = new Text(defaultVisibility);
      this.cache = new LRUMap(1000);
      this.tmpVis = new Text();
 +    if(iterator instanceof Filterer)
 +      ((Filterer<Key,Value>)iterator).applyFilter(new ColumnVisibilityPredicate(auths), false);
-     else
-       throw new IllegalArgumentException("expected to get a "+Filterer.class.getSimpleName());
 +    setSource(iterator);
    }
 -  
 +
 +  @Override
 +  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
 +    throw new UnsupportedOperationException();
 +  }
 +
    @Override
    public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
 -    return new VisibilityFilter(getSource().deepCopy(env), ve.getAuthorizations(), TextUtil.getBytes(defaultVisibility));
 +    return new VisibilityFilter(getSource().deepCopy(env), auths, TextUtil.getBytes(defaultVisibility));
    }
    
    @Override
@@@ -76,8 -67,16 +78,14 @@@
      if (b != null)
        return b;
      
-     Boolean bb = new ColumnVisibility(testVis).evaluate(auths);
++    Boolean bb;
+     try {
 -      Boolean bb = ve.evaluate(new ColumnVisibility(testVis));
 -      cache.put(new Text(testVis), bb);
 -      return bb;
 -    } catch (VisibilityParseException e) {
 -      log.error("Parse Error", e);
 -      return false;
++      bb = new ColumnVisibility(testVis).evaluate(auths);
+     } catch (BadArgumentException e) {
 -      log.error("Parse Error", e);
 -      return false;
++      log.warn(e);
++      bb = false;
+     }
 +    cache.put(new Text(testVis), bb);
 +    return bb;
    }
  }


[09/15] git commit: ACCUMULO-652 fixed bug in initial seek to index with filtering turned on, added more test cases, changed predicate objects to be grabbed through accessors, removed Filterer interface on WrappingIterator and added Filterer interface to

Posted by el...@apache.org.
ACCUMULO-652 fixed bug in initial seek to index with filtering turned on, added more test cases, changed predicate objects to be grabbed through accessors, removed Filterer interface on WrappingIterator and added Filterer interface to Filter, removed generic filtering capability from RFile and moved it to GenericFilterer

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1357868 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/665887f4
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/665887f4
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/665887f4

Branch: refs/heads/ACCUMULO-652
Commit: 665887f46c84239efcee3d9017eb781889f6d889
Parents: af8cefa
Author: Adam Fuchs <af...@apache.org>
Authored: Thu Jul 5 20:18:46 2012 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Thu Jul 5 20:18:46 2012 +0000

----------------------------------------------------------------------
 .../accumulo/core/file/rfile/BlockStats.java    |   1 -
 .../core/file/rfile/MultiLevelIndex.java        |  18 ++--
 .../apache/accumulo/core/file/rfile/RFile.java  |  89 +---------------
 .../apache/accumulo/core/iterators/Filter.java  |  11 +-
 .../core/iterators/WrappingIterator.java        |  11 +-
 .../predicates/ColumnVisibilityPredicate.java   |  38 ++++---
 .../predicates/TimestampRangePredicate.java     |  26 +++--
 .../core/iterators/system/GenericFilterer.java  | 104 +++++++++++++++++++
 .../core/iterators/system/VisibilityFilter.java |  10 +-
 .../file/rfile/AuthorizationFilterTest.java     |  11 +-
 .../core/file/rfile/MultiLevelIndexTest.java    |  71 ++++++++++++-
 .../core/file/rfile/TimestampFilterTest.java    |  12 ++-
 .../core/iterators/user/FilterTest.java         |   4 +-
 13 files changed, 254 insertions(+), 152 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockStats.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockStats.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockStats.java
index d1b1eac..d885106 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockStats.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockStats.java
@@ -99,7 +99,6 @@ public class BlockStats implements Writable {
       else {
         byte[] visibility = minimumVisibility.getExpression();
         if (visibility.length > maxVisibilityLength) {
-          System.out.println("expression too large: "+toString());
           out.writeInt(0);
         } else {
           out.writeInt(visibility.length);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
index d6d7cd1..9a78dad 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
@@ -514,9 +514,9 @@ public class MultiLevelIndex {
       }
       
       private final boolean checkFilterIndexEntry(IndexEntry ie) {
-        if(timestampFilter != null && (ie.blockStats.maxTimestamp < timestampFilter.startTimestamp || ie.blockStats.minTimestamp > timestampFilter.endTimestamp))
+        if(timestampFilter != null && (ie.blockStats.maxTimestamp < timestampFilter.getStartTimestamp() || ie.blockStats.minTimestamp > timestampFilter.getEndTimestamp()))
           return false;
-        if(columnVisibilityPredicate != null && ie.blockStats.minimumVisibility != null && ie.blockStats.minimumVisibility.evaluate(columnVisibilityPredicate.auths) == false)
+        if(columnVisibilityPredicate != null && ie.blockStats.minimumVisibility != null && ie.blockStats.minimumVisibility.evaluate(columnVisibilityPredicate.getAuthorizations()) == false)
           return false;
         return true;
       }
@@ -568,7 +568,7 @@ public class MultiLevelIndex {
             return;
           } else {
             if (top.block.level == 0) {
-              // found a matching index entry
+              // found a matching index entry -- set the pointer to be just before this location
               top.offset = pos - 1;
               return;
             } else {
@@ -596,12 +596,17 @@ public class MultiLevelIndex {
           } else {
             if (top.block.level == 0) {
               // success!
-              return;
+              break;
             }
             // go down
             position.add(new StackEntry(getIndexBlock(index.get(top.offset)), -1));
           }
         }
+        if (position.isEmpty())
+          return;
+        StackEntry e = position.peek();
+        nextEntry = e.block.getIndex().get(e.offset);
+        nextIndex = e.block.getOffset() + e.offset;
       }
       
       IndexEntry nextEntry = null;
@@ -616,11 +621,6 @@ public class MultiLevelIndex {
           } catch (IOException e) {
             throw new RuntimeException(e);
           }
-          if (position.isEmpty())
-            return;
-          StackEntry e = position.peek();
-          nextEntry = e.block.getIndex().get(e.offset);
-          nextIndex = e.block.getOffset() + e.offset;
         }
       }
       

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
index d250155..2cb717e 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
@@ -794,7 +794,7 @@ public class RFile {
         TimestampRangePredicate p = (TimestampRangePredicate)filter;
         // intersect with previous timestampRange
         if(timestampRange != null)
-          timestampRange = new TimestampRangePredicate(Math.max(p.startTimestamp, timestampRange.startTimestamp), Math.min(p.endTimestamp, timestampRange.endTimestamp));
+          timestampRange = new TimestampRangePredicate(Math.max(p.getStartTimestamp(), timestampRange.getStartTimestamp()), Math.min(p.getEndTimestamp(), timestampRange.getEndTimestamp()));
         else
           timestampRange = p;
         index.setTimestampRange(timestampRange);
@@ -982,9 +982,6 @@ public class RFile {
     @Override
     public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
       
-      topKey = null;
-      topValue = null;
-      
       clear();
       
       numLGSeeked = 0;
@@ -1090,91 +1087,16 @@ public class RFile {
       }
     }
     
-    ArrayList<Predicate<Key,Value>> filters = new ArrayList<Predicate<Key,Value>>();
-    
     TimestampRangePredicate timestampFilter = null;
     ColumnVisibilityPredicate columnVisibilityPredicate = null;
     
-    Key topKey;
-    Value topValue;
-    
-    /* (non-Javadoc)
-     * @see org.apache.accumulo.core.iterators.system.HeapIterator#hasTop()
-     */
-    @Override
-    public boolean hasTop() {
-      if(topKey == null)
-      {
-        while(super.hasTop())
-        {
-          topKey = super.getTopKey();
-          topValue = super.getTopValue();
-          // check all the filters to see if we found a valid key/value pair
-          boolean keep = true;
-          for(Predicate<Key,Value> filter: filters)
-          {
-            if(!filter.evaluate(topKey, topValue))
-            {
-              keep = false;
-              try {
-                super.next();
-              } catch (IOException e) {
-                throw new RuntimeException(e);
-              }
-              break;
-            }
-          }
-          if(keep == true)
-            return true;
-        }
-        // ran out of key/value pairs
-        topKey = null;
-        topValue = null;
-        return false;
-      }
-      else
-      {
-        return true;
-      }
-    }
-
-    /* (non-Javadoc)
-     * @see org.apache.accumulo.core.iterators.system.HeapIterator#next()
-     */
-    @Override
-    public void next() throws IOException {
-      topKey = null;
-      topValue = null;
-      super.next();
-    }
-
-    /* (non-Javadoc)
-     * @see org.apache.accumulo.core.iterators.system.HeapIterator#getTopKey()
-     */
-    @Override
-    public Key getTopKey() {
-      if(topKey == null)
-        hasTop();
-      return topKey;
-    }
-    
-    /* (non-Javadoc)
-     * @see org.apache.accumulo.core.iterators.system.HeapIterator#getTopValue()
-     */
-    @Override
-    public Value getTopValue() {
-      if(topValue == null)
-        hasTop();
-      return topValue;
-    }
-    
     /* (non-Javadoc)
      * @see org.apache.accumulo.core.iterators.Filterer#applyFilter(org.apache.accumulo.core.iterators.Predicate)
      */
     @Override
     public void applyFilter(Predicate<Key,Value> filter, boolean required) {
       if(required)
-        filters.add(filter);
+        throw new IllegalArgumentException("RFile cannot guarantee filtering");
       // the HeapIterator will pass this filter on to its children, a collection of LocalityGroupReaders
       if(filter instanceof TimestampRangePredicate)
         this.timestampFilter = (TimestampRangePredicate)filter;
@@ -1191,9 +1113,6 @@ public class RFile {
     int max_cf = 10;
     int max_cq = 10;
     
-    // FSDataOutputStream fsout = fs.create(new Path("/tmp/test.rf"));
-    
-    // RFile.Writer w = new RFile.Writer(fsout, 1000, "gz", conf);
     CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(fs, new Path("/tmp/test.rf"), "gz", conf);
     RFile.Writer w = new RFile.Writer(_cbw, 100000);
     
@@ -1213,9 +1132,7 @@ public class RFile {
     }
     
     w.close();
-    // fsout.close();
-    
-    // Logger.getLogger("accumulo.core.file.rfile").setLevel(Level.DEBUG);
+
     long t1 = System.currentTimeMillis();
     FSDataInputStream fsin = fs.open(new Path("/tmp/test.rf"));
     long t2 = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java b/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
index a9ed76c..82b3fd8 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/Filter.java
@@ -36,7 +36,7 @@ import org.apache.accumulo.core.data.Value;
  * This iterator takes an optional "negate" boolean parameter that defaults to false. If negate is set to true, this class instead omits entries that match its
  * filter, thus iterating over entries that do not match its filter.
  */
-public abstract class Filter extends WrappingIterator implements OptionDescriber {
+public abstract class Filter extends WrappingIterator implements OptionDescriber, Filterer<Key,Value> {
   @Override
   public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
     Filter newInstance;
@@ -117,4 +117,13 @@ public abstract class Filter extends WrappingIterator implements OptionDescriber
   public static void setNegate(IteratorSetting is, boolean negate) {
     is.addOption(NEGATE, Boolean.toString(negate));
   }
+  
+  @SuppressWarnings("unchecked")
+  @Override
+  public void applyFilter(Predicate<Key,Value> filter, boolean required) {
+    if(getSource() instanceof Filterer)
+      ((Filterer<Key,Value>)getSource()).applyFilter(filter, required);
+    else if(required)
+      throw new IllegalArgumentException("Cannot require filter of underlying iterator");
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java
index 84ffb7c..a9c7f2d 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java
@@ -25,7 +25,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 
-public abstract class WrappingIterator implements SortedKeyValueIterator<Key,Value>, Filterer<Key,Value> {
+public abstract class WrappingIterator implements SortedKeyValueIterator<Key,Value> {
   
   private SortedKeyValueIterator<Key,Value> source = null;
   boolean seenSeek = false;
@@ -93,13 +93,4 @@ public abstract class WrappingIterator implements SortedKeyValueIterator<Key,Val
     seenSeek = true;
   }
   
-  @SuppressWarnings("unchecked")
-  @Override
-  public void applyFilter(Predicate<Key,Value> filter, boolean required) {
-    if(source instanceof Filterer)
-      ((Filterer<Key,Value>)source).applyFilter(filter, required);
-    else if(required)
-      throw new IllegalArgumentException("Cannot require filter of underlying iterator");
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/main/java/org/apache/accumulo/core/iterators/predicates/ColumnVisibilityPredicate.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/predicates/ColumnVisibilityPredicate.java b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/ColumnVisibilityPredicate.java
index cb1b521..30f1dc5 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/predicates/ColumnVisibilityPredicate.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/ColumnVisibilityPredicate.java
@@ -6,21 +6,25 @@ import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 
-public final class ColumnVisibilityPredicate implements Predicate<Key, Value> {
-	public final Authorizations auths;
-
-	public ColumnVisibilityPredicate(Authorizations auths)
-	{
-		this.auths = auths;
-	}
-	
-	@Override
-	public boolean evaluate(Key k, Value v) {
-		return new ColumnVisibility(k.getColumnVisibility()).evaluate(auths);
-	}
-	
-	@Override
-	public String toString() {
-	  return "{"+auths+"}";
-	}
+public final class ColumnVisibilityPredicate implements Predicate<Key,Value> {
+  
+  private final Authorizations auths;
+  
+  public Authorizations getAuthorizations() {
+    return auths;
+  }
+  
+  public ColumnVisibilityPredicate(Authorizations auths) {
+    this.auths = auths;
+  }
+  
+  @Override
+  public boolean evaluate(Key k, Value v) {
+    return new ColumnVisibility(k.getColumnVisibility()).evaluate(auths);
+  }
+  
+  @Override
+  public String toString() {
+    return "{" + auths + "}";
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
index 25f68a1..3678085 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
@@ -24,24 +24,30 @@ import org.apache.accumulo.core.iterators.Predicate;
  * TimestampRangeFilter is used to determine whether a Key/Value pair falls within a timestamp range
  */
 public class TimestampRangePredicate implements Predicate<Key,Value> {
-
-  public final long startTimestamp;
-  public final long endTimestamp;
   
+  private final long startTimestamp;
+  private final long endTimestamp;
+  
+  public long getStartTimestamp() {
+    return startTimestamp;
+  }
+  
+  public long getEndTimestamp() {
+    return endTimestamp;
+  }
   
   /**
-   * @param startTimestamp - inclusive first allowable timestamp
-   * @param endTimestamp - inclusive last allowable timestamp
+   * @param startTimestamp
+   *          - inclusive first allowable timestamp
+   * @param endTimestamp
+   *          - inclusive last allowable timestamp
    */
   public TimestampRangePredicate(long startTimestamp, long endTimestamp) {
     super();
     this.startTimestamp = startTimestamp;
     this.endTimestamp = endTimestamp;
   }
-
-  /* (non-Javadoc)
-   * @see org.apache.accumulo.core.iterators.Predicate#evaluate(java.lang.Object, java.lang.Object)
-   */
+  
   /**
    * return true IFF the key falls within the timestamp range
    */
@@ -53,6 +59,6 @@ public class TimestampRangePredicate implements Predicate<Key,Value> {
   
   @Override
   public String toString() {
-    return "{"+startTimestamp+"-"+endTimestamp+"}";
+    return "{" + startTimestamp + "-" + endTimestamp + "}";
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/main/java/org/apache/accumulo/core/iterators/system/GenericFilterer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/GenericFilterer.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/GenericFilterer.java
new file mode 100644
index 0000000..d531d52
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/GenericFilterer.java
@@ -0,0 +1,104 @@
+package org.apache.accumulo.core.iterators.system;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filterer;
+import org.apache.accumulo.core.iterators.Predicate;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.WrappingIterator;
+
+public class GenericFilterer extends WrappingIterator implements Filterer<Key,Value> {
+  
+  private ArrayList<Predicate<Key,Value>> filters = new ArrayList<Predicate<Key,Value>>();
+  
+  private Key topKey;
+  private Value topValue;
+  
+  public GenericFilterer(SortedKeyValueIterator<Key,Value> source) {
+    setSource(source);
+  }
+
+  public GenericFilterer() {
+  }
+  
+  @Override
+  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
+    topKey = null;
+    topValue = null;
+    super.seek(range, columnFamilies, inclusive);
+  }
+
+  @Override
+  public void next() throws IOException {
+    topKey = null;
+    topValue = null;
+    super.next();
+  }
+
+  @Override
+  public Key getTopKey() {
+    if(topKey == null)
+      hasTop();
+    return topKey;
+  }
+  
+  @Override
+  public Value getTopValue() {
+    if(topValue == null)
+      hasTop();
+    return topValue;
+  }
+  
+  @Override
+  public boolean hasTop() {
+    if(topKey == null)
+    {
+      while(super.hasTop())
+      {
+        topKey = super.getTopKey();
+        topValue = super.getTopValue();
+        // check all the filters to see if we found a valid key/value pair
+        boolean keep = true;
+        for(Predicate<Key,Value> filter: filters)
+        {
+          if(!filter.evaluate(topKey, topValue))
+          {
+            keep = false;
+            try {
+              super.next();
+            } catch (IOException e) {
+              throw new RuntimeException(e);
+            }
+            break;
+          }
+        }
+        if(keep == true)
+          return true;
+      }
+      // ran out of key/value pairs
+      topKey = null;
+      topValue = null;
+      return false;
+    }
+    else
+    {
+      return true;
+    }
+  }
+  
+  @Override
+  public void applyFilter(Predicate<Key,Value> filter, boolean required) {
+    filters.add(filter);
+    if(getSource() instanceof Filterer)
+    {
+      Filterer<Key,Value> source = (Filterer<Key,Value>)getSource();
+      source.applyFilter(filter, false);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
index 2c05a03..5deed97 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
@@ -42,18 +42,20 @@ public class VisibilityFilter extends Filter {
   private static final Logger log = Logger.getLogger(VisibilityFilter.class);
   
   public VisibilityFilter(SortedKeyValueIterator<Key,Value> iterator, Authorizations authorizations, byte[] defaultVisibility) {
-    setSource(iterator);
     this.auths = authorizations;
     this.defaultVisibility = new Text(defaultVisibility);
     this.cache = new LRUMap(1000);
     this.tmpVis = new Text();
+    if(iterator instanceof Filterer)
+      ((Filterer<Key,Value>)iterator).applyFilter(new ColumnVisibilityPredicate(auths), false);
+    else
+      throw new IllegalArgumentException("expected to get a "+Filterer.class.getSimpleName());
+    setSource(iterator);
   }
 
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
-    super.init(source, options, env);
-    if(source instanceof Filterer)
-      ((Filterer<Key,Value>)source).applyFilter(new ColumnVisibilityPredicate(auths), false);
+    throw new UnsupportedOperationException();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
index 7dac68b..080b6fb 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
@@ -35,6 +35,7 @@ import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
 import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
 import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
+import org.apache.accumulo.core.iterators.system.VisibilityFilter;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.hadoop.conf.Configuration;
@@ -97,12 +98,12 @@ public class AuthorizationFilterTest {
     CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, conf);
     RFile.Reader reader = new RFile.Reader(_cbr);
     int count = 0;
-    reader.applyFilter(columnVisibilityPredicate,true);
-    reader.seek(new Range(), Collections.EMPTY_SET, false);
-    while (reader.hasTop()) {
+    VisibilityFilter vf = new VisibilityFilter(reader, auths, new byte[0]);
+    vf.seek(new Range(), Collections.EMPTY_SET, false);
+    while (vf.hasTop()) {
       count++;
-      assertTrue(columnVisibilityPredicate.evaluate(reader.getTopKey(), reader.getTopValue()));
-      reader.next();
+      assertTrue(columnVisibilityPredicate.evaluate(vf.getTopKey(), vf.getTopValue()));
+      vf.next();
     }
     assertEquals(expected, count);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
index b84f277..25bea00 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
@@ -20,8 +20,6 @@ import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.file.blockfile.ABlockWriter;
 import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
@@ -32,14 +30,20 @@ import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader;
 import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader.IndexIterator;
 import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Writer;
 import org.apache.accumulo.core.file.rfile.RFileTest.SeekableByteArrayInputStream;
+import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
+import org.junit.Test;
+
+import static junit.framework.Assert.*;
 
-public class MultiLevelIndexTest extends TestCase {
+public class MultiLevelIndexTest {
   
+  @Test
   public void test1() throws Exception {
     
     runTest(500, 1);
@@ -111,4 +115,65 @@ public class MultiLevelIndexTest extends TestCase {
     
   }
   
+  /**
+   * Test the behavior of seeking to a spot that the high-level index blocks
+   * say passes a filter, but the low level index blocks do not agree, forcing
+   * an index scan beyond the end of the first low-level block.
+   * @throws IOException
+   */
+  @Test
+  public void testIndexScanWithFilter() throws IOException
+  {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    FSDataOutputStream dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
+    CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(dos, "gz", CachedConfiguration.getInstance());
+    
+    BufferedWriter mliw = new BufferedWriter(new Writer(_cbw, 1));
+    
+    // throw in a block stat with a visibility that is too big to serialize
+    mliw.add(new Key(String.format("%05d000", 0),"cf","cq","a",0), new BlockStats(0,0,new ColumnVisibility("abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"),0), 0, 0, 0, RFile.RINDEX_VER_7);
+    for (int i = 1; i < 100; i++)
+    {
+      mliw.add(new Key(String.format("%05d000", i),"cf","cq","a",0), new BlockStats(0,0,new ColumnVisibility("a"),i), 0, 0, 0, RFile.RINDEX_VER_7);
+    }
+    mliw.add(new Key(String.format("%05d000", 100),"cf","cq","",0), new BlockStats(0,0,new ColumnVisibility(""),100), 0, 0, 0, RFile.RINDEX_VER_7);
+    for (int i = 101; i < 200; i++)
+    {
+      mliw.add(new Key(String.format("%05d000", i),"cf","cq","a",0), new BlockStats(0,0,new ColumnVisibility("a"),i), 0, 0, 0, RFile.RINDEX_VER_7);
+    }
+    mliw.addLast(new Key(String.format("%05d000", 200),"cf","cq","a",0), new BlockStats(0,0,new ColumnVisibility("a"),200), 0, 0, 0, RFile.RINDEX_VER_7);
+
+    ABlockWriter root = _cbw.prepareMetaBlock("root");
+    mliw.close(root);
+    root.close();
+    
+    _cbw.close();
+    dos.close();
+    baos.close();
+    
+    byte[] data = baos.toByteArray();
+    SeekableByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
+    FSDataInputStream in = new FSDataInputStream(bais);
+    CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, CachedConfiguration.getInstance());
+    
+    Reader reader = new Reader(_cbr, RFile.RINDEX_VER_7);
+    BlockRead rootIn = _cbr.getMetaBlock("root");
+    reader.readFields(rootIn);
+    rootIn.close();
+    reader.setColumnVisibilityPredicate(new ColumnVisibilityPredicate(new Authorizations()));
+    // seek past the block stat with the visibility that is too big to serialize, but not past the block that can be seen
+    IndexIterator liter = reader.lookup(new Key("000010"));
+    int count = 0;
+    while (liter.hasNext()) {
+      assertEquals(100, liter.nextIndex());
+      assertEquals(100, liter.peek().getNumEntries());
+      assertEquals(100, liter.next().getNumEntries());
+      count++;
+    }
+    
+    assertEquals(1, count);
+    
+    _cbr.close();
+  }
+  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
index 160d7bd..f72a42b 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.file.rfile.RFileTest.SeekableByteArrayInputStrea
 import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
 import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
+import org.apache.accumulo.core.iterators.system.GenericFilterer;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -82,14 +83,15 @@ public class TimestampFilterTest {
     FSDataInputStream in = new FSDataInputStream(bais);
     CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, conf);
     RFile.Reader reader = new RFile.Reader(_cbr);
+    GenericFilterer filterer = new GenericFilterer(reader);
     int count = 0;
-    reader.applyFilter(timeRange,true);
-    reader.seek(new Range(), Collections.EMPTY_SET, false);
-    while(reader.hasTop())
+    filterer.applyFilter(timeRange,true);
+    filterer.seek(new Range(), Collections.EMPTY_SET, false);
+    while(filterer.hasTop())
     {
       count++;
-      assertTrue(timeRange.evaluate(reader.getTopKey(),reader.getTopValue()));
-      reader.next();
+      assertTrue(timeRange.evaluate(filterer.getTopKey(),filterer.getTopValue()));
+      filterer.next();
     }
     assertEquals(expected, count);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/665887f4/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java b/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java
index d6319fb..56b0fc3 100644
--- a/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java
@@ -40,6 +40,7 @@ import org.apache.accumulo.core.iterators.Filter;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.SortedMapIterator;
 import org.apache.accumulo.core.iterators.system.ColumnQualifierFilter;
+import org.apache.accumulo.core.iterators.system.GenericFilterer;
 import org.apache.accumulo.core.iterators.system.VisibilityFilter;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
@@ -305,7 +306,8 @@ public class FilterTest {
     }
     assertTrue(tm.size() == 1000);
     
-    VisibilityFilter a = new VisibilityFilter(new SortedMapIterator(tm), auths, le2.getExpression());
+    SortedKeyValueIterator<Key,Value> iter = new GenericFilterer(new SortedMapIterator(tm));
+    VisibilityFilter a = new VisibilityFilter(iter, auths, le2.getExpression());
     a.seek(new Range(), EMPTY_COL_FAMS, false);
     int size = size(a);
     assertTrue("size was " + size, size == 750);


[07/15] git commit: ACCUMULO-652 fixed stat aggregation at higher levels, fixed some formatting, and fixed timestamp filtering of index blocks in the IndexIterator

Posted by el...@apache.org.
ACCUMULO-652 fixed stat aggregation at higher levels, fixed some formatting, and fixed timestamp filtering of index blocks in the IndexIterator

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1355044 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/727e61ec
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/727e61ec
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/727e61ec

Branch: refs/heads/ACCUMULO-652
Commit: 727e61eca65008cca5de89ce0d849dae7ec5b559
Parents: 8a3ddeb
Author: Adam Fuchs <af...@apache.org>
Authored: Thu Jun 28 15:30:29 2012 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Thu Jun 28 15:30:29 2012 +0000

----------------------------------------------------------------------
 .../core/file/rfile/MultiLevelIndex.java        | 43 ++++++++++----------
 .../apache/accumulo/core/file/rfile/RFile.java  |  8 ++++
 .../predicates/TimestampRangePredicate.java     |  4 ++
 .../core/file/rfile/TimestampFilterTest.java    |  2 +-
 4 files changed, 35 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/727e61ec/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
index 4163894..d6d7cd1 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
@@ -39,6 +39,7 @@ import org.apache.accumulo.core.file.blockfile.ABlockWriter;
 import org.apache.accumulo.core.file.blockfile.BlockFileReader;
 import org.apache.accumulo.core.file.blockfile.BlockFileWriter;
 import org.apache.accumulo.core.file.rfile.bcfile.Utils;
+import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
 import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.hadoop.io.WritableComparable;
@@ -406,7 +407,7 @@ public class MultiLevelIndex {
       writer.close(out);
     }
   }
-
+  
   public static class Writer {
     private int threshold;
     
@@ -424,8 +425,7 @@ public class MultiLevelIndex {
       levels = new ArrayList<IndexBlock>();
     }
     
-    private void add(int level, Key key, BlockStats blockStats, long offset, long compressedSize, long rawSize, boolean last, int version)
-        throws IOException {
+    private void add(int level, Key key, BlockStats blockStats, long offset, long compressedSize, long rawSize, boolean last, int version) throws IOException {
       if (level == levels.size()) {
         levels.add(new IndexBlock(level, 0));
       }
@@ -443,7 +443,7 @@ public class MultiLevelIndex {
         iblock.write(out);
         out.close();
         
-        add(level + 1, key, blockStats, out.getStartPos(), out.getCompressedSize(), out.getRawSize(), last, version);
+        add(level + 1, key, iblock.blockStats, out.getStartPos(), out.getCompressedSize(), out.getRawSize(), last, version);
         
         if (last)
           levels.set(level, null);
@@ -501,9 +501,11 @@ public class MultiLevelIndex {
     class IndexIterator implements Iterator<IndexEntry> {
       private Stack<StackEntry> position = new Stack<StackEntry>();
       private final TimestampRangePredicate timestampFilter;
+      private final ColumnVisibilityPredicate columnVisibilityPredicate;
       
-      private IndexIterator(TimestampRangePredicate timestampFilter, Key lookupKey) {
+      private IndexIterator(TimestampRangePredicate timestampFilter, ColumnVisibilityPredicate columnVisibilityPredicate, Key lookupKey) {
         this.timestampFilter = timestampFilter;
+        this.columnVisibilityPredicate = columnVisibilityPredicate;
         try {
           seek(lookupKey);
         } catch (IOException e) {
@@ -512,10 +514,10 @@ public class MultiLevelIndex {
       }
       
       private final boolean checkFilterIndexEntry(IndexEntry ie) {
-        if(timestampFilter == null)
-        if (timestampFilter != null && (ie.blockStats.maxTimestamp < timestampFilter.startTimestamp || ie.blockStats.minTimestamp > timestampFilter.endTimestamp)) {
+        if(timestampFilter != null && (ie.blockStats.maxTimestamp < timestampFilter.startTimestamp || ie.blockStats.minTimestamp > timestampFilter.endTimestamp))
+          return false;
+        if(columnVisibilityPredicate != null && ie.blockStats.minimumVisibility != null && ie.blockStats.minimumVisibility.evaluate(columnVisibilityPredicate.auths) == false)
           return false;
-        }
         return true;
       }
       
@@ -532,7 +534,6 @@ public class MultiLevelIndex {
             }
           });
           
-          
           if (pos < 0) {
             pos = (pos * -1) - 1;
           } else if (pos < top.block.getKeyIndex().size()) {
@@ -542,14 +543,13 @@ public class MultiLevelIndex {
             }
           }
           
-
           IndexEntry ie = null;
           List<IndexEntry> index = top.block.getIndex();
           
-          if(pos > 0)
-          {
-            // look backwards to find any initial previousEntry that might match the timestamp range such that no entry within the given timestamp range is between the seeked key and the previousKey
-            previousEntry = index.get(pos-1);
+          if (pos > 0) {
+            // look backwards to find any initial previousEntry that might match the timestamp range such that no entry within the given timestamp range is
+            // between the seeked key and the previousKey
+            previousEntry = index.get(pos - 1);
             // TODO: find the offset for this block
             previousIndex = Integer.MIN_VALUE;
           }
@@ -562,7 +562,6 @@ public class MultiLevelIndex {
             pos++;
           }
           
-          
           if (pos == index.size()) {
             position.pop();
             goToNext();
@@ -581,7 +580,6 @@ public class MultiLevelIndex {
       }
       
       private void goToNext() throws IOException {
-        int numSkippedBlocks = 0;
         // traverse the index tree forwards
         while (position.isEmpty() == false) {
           StackEntry top = position.peek();
@@ -590,7 +588,6 @@ public class MultiLevelIndex {
           while (top.offset < index.size()) {
             if (checkFilterIndexEntry(index.get(top.offset)))
               break;
-            numSkippedBlocks++;
             top.offset++;
           }
           if (top.offset == index.size()) {
@@ -651,8 +648,6 @@ public class MultiLevelIndex {
         return nextEntry;
       }
       
-      private int blocksReturned = 0;
-      
       public IndexEntry next() {
         prepNext();
         previousEntry = nextEntry;
@@ -697,7 +692,7 @@ public class MultiLevelIndex {
     }
     
     IndexIterator lookup(Key key) throws IOException {
-      return new IndexIterator(timestampRange, key);
+      return new IndexIterator(timestampRange, columnVisibilityPredicate, key);
     }
     
     public void readFields(DataInput in) throws IOException {
@@ -751,7 +746,7 @@ public class MultiLevelIndex {
       return rootBlock.getIndex().get(rootBlock.getIndex().size() - 1).getKey();
     }
     
-    TimestampRangePredicate timestampRange;
+    TimestampRangePredicate timestampRange = null;
     
     /**
      * @param r
@@ -759,6 +754,12 @@ public class MultiLevelIndex {
     public void setTimestampRange(TimestampRangePredicate r) {
       this.timestampRange = r;
     }
+    
+    ColumnVisibilityPredicate columnVisibilityPredicate = null;
+    
+    public void setColumnVisibilityPredicate(ColumnVisibilityPredicate columnVisibilityPredicate) {
+      this.columnVisibilityPredicate = columnVisibilityPredicate;
+    }
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/727e61ec/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
index c4bdb7f..5e1e8a3 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
@@ -61,6 +61,7 @@ import org.apache.accumulo.core.iterators.IterationInterruptedException;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
 import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
 import org.apache.accumulo.core.iterators.system.HeapIterator;
 import org.apache.accumulo.core.security.ColumnVisibility;
@@ -776,6 +777,7 @@ public class RFile {
     }
     
     private TimestampRangePredicate timestampRange;
+    private ColumnVisibilityPredicate columnVisibilityPredicate;
     private boolean filterChanged = false;
 
     /* (non-Javadoc)
@@ -795,6 +797,12 @@ public class RFile {
           timestampRange = p;
         index.setTimestampRange(timestampRange);
       }
+      else if(filter instanceof ColumnVisibilityPredicate)
+      {
+    	  filterChanged = true;
+    	  columnVisibilityPredicate = (ColumnVisibilityPredicate)filter;
+    	  index.setColumnVisibilityPredicate(columnVisibilityPredicate);
+      }
       else
       {
         throw new RuntimeException("yikes, not yet implemented");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/727e61ec/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
index eb5080b..25f68a1 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
@@ -51,4 +51,8 @@ public class TimestampRangePredicate implements Predicate<Key,Value> {
     return timestamp >= startTimestamp && timestamp <= endTimestamp;
   }
   
+  @Override
+  public String toString() {
+    return "{"+startTimestamp+"-"+endTimestamp+"}";
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/727e61ec/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
index c58f924..463c779 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
@@ -46,7 +46,7 @@ public class TimestampFilterTest {
   @Test
   public void testRFileTimestampFiltering() throws Exception {
     // TODO create an RFile with increasing timestamp and random key order
-    Predicate<Key,Value> timeRange = new TimestampRangePredicate(100, 110);
+    Predicate<Key,Value> timeRange = new TimestampRangePredicate(73, 117);
     int expected = 0;
     Random r = new Random();
     Configuration conf = new Configuration();


[04/15] git commit: ACCUMULO-652 removed unused (and now broken) classes

Posted by el...@apache.org.
ACCUMULO-652 removed unused (and now broken) classes

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1354476 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/80ee809a
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/80ee809a
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/80ee809a

Branch: refs/heads/ACCUMULO-652
Commit: 80ee809a8d3e47a4fd20b4059cbd37a9db0b5579
Parents: 3fcd07d
Author: Adam Fuchs <af...@apache.org>
Authored: Wed Jun 27 12:49:32 2012 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Wed Jun 27 12:49:32 2012 +0000

----------------------------------------------------------------------
 .../core/security/VisibilityEvaluator.java      | 71 --------------------
 .../core/security/VisibilityInterpreter.java    | 34 ----------
 .../security/VisibilityInterpreterFactory.java  | 40 -----------
 .../core/security/VisibilityParseException.java | 34 ----------
 4 files changed, 179 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/80ee809a/core/src/main/java/org/apache/accumulo/core/security/VisibilityEvaluator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/VisibilityEvaluator.java b/core/src/main/java/org/apache/accumulo/core/security/VisibilityEvaluator.java
deleted file mode 100644
index 62bb167..0000000
--- a/core/src/main/java/org/apache/accumulo/core/security/VisibilityEvaluator.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.security;
-
-import java.util.Collection;
-
-import org.apache.accumulo.core.data.ArrayByteSequence;
-import org.apache.accumulo.core.security.ColumnVisibility.Node;
-
-public class VisibilityEvaluator {
-  private Authorizations auths;
-  
-  VisibilityEvaluator(Collection<byte[]> authorizations) {
-    this.auths = new Authorizations(authorizations);
-  }
-  
-  /**
-   * The VisibilityEvaluator computes a trie from the given Authorizations, that ColumnVisibility expressions can be evaluated against.
-   */
-  public VisibilityEvaluator(Authorizations authorizations) {
-    this.auths = authorizations;
-  }
-  
-  public Authorizations getAuthorizations() {
-    return new Authorizations(auths.getAuthorizations());
-  }
-  
-  public boolean evaluate(ColumnVisibility visibility) throws VisibilityParseException {
-    return evaluate(visibility.getExpression(), visibility.getParseTree());
-  }
-  
-  private final boolean evaluate(final byte[] expression, final Node root) throws VisibilityParseException {
-    switch (root.type) {
-      case TERM:
-        int len = root.getTermEnd() - root.getTermStart();
-        return auths.contains(new ArrayByteSequence(expression, root.getTermStart(), len));
-      case AND:
-        if (root.children == null || root.children.size() < 2)
-          throw new VisibilityParseException("AND has less than 2 children", expression, root.start);
-        for (Node child : root.children) {
-          if (!evaluate(expression, child))
-            return false;
-        }
-        return true;
-      case OR:
-        if (root.children == null || root.children.size() < 2)
-          throw new VisibilityParseException("OR has less than 2 children", expression, root.start);
-        for (Node child : root.children) {
-          if (evaluate(expression, child))
-            return true;
-        }
-        return false;
-      default:
-        throw new VisibilityParseException("No such node type", expression, root.start);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/80ee809a/core/src/main/java/org/apache/accumulo/core/security/VisibilityInterpreter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/VisibilityInterpreter.java b/core/src/main/java/org/apache/accumulo/core/security/VisibilityInterpreter.java
deleted file mode 100644
index 68de07f..0000000
--- a/core/src/main/java/org/apache/accumulo/core/security/VisibilityInterpreter.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.security;
-
-import java.io.Serializable;
-
-public interface VisibilityInterpreter extends Serializable {
-  public abstract String getAbbreviatedValue();
-  
-  public abstract String getFullValue();
-  
-  public abstract void merge(ColumnVisibility other, Authorizations authorizations);
-  
-  public abstract void merge(VisibilityInterpreter other);
-  
-  // Factory type method that can be used from an instance
-  public abstract VisibilityInterpreter create();
-  
-  public abstract VisibilityInterpreter create(ColumnVisibility visibility, Authorizations authorizations);
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/80ee809a/core/src/main/java/org/apache/accumulo/core/security/VisibilityInterpreterFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/VisibilityInterpreterFactory.java b/core/src/main/java/org/apache/accumulo/core/security/VisibilityInterpreterFactory.java
deleted file mode 100644
index 994087e..0000000
--- a/core/src/main/java/org/apache/accumulo/core/security/VisibilityInterpreterFactory.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.security;
-
-
-public class VisibilityInterpreterFactory {
-  private static VisibilityInterpreter interpreter = null;
-  
-  public static VisibilityInterpreter create() {
-    if (interpreter == null) {
-      throw new IllegalStateException("ColumnVisibilityInterpreterFactory is not configured:  Interpreter is null");
-    }
-    return interpreter.create();
-  }
-  
-  public static VisibilityInterpreter create(ColumnVisibility cv, Authorizations authorizations) {
-    if (interpreter == null) {
-      throw new IllegalStateException("ColumnVisibilityInterpreterFactory is not configured:  Interpreter is null");
-    }
-    return interpreter.create(cv, authorizations);
-  }
-  
-  public static void setInterpreter(VisibilityInterpreter interpreter) {
-    VisibilityInterpreterFactory.interpreter = interpreter;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/80ee809a/core/src/main/java/org/apache/accumulo/core/security/VisibilityParseException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/VisibilityParseException.java b/core/src/main/java/org/apache/accumulo/core/security/VisibilityParseException.java
deleted file mode 100644
index 2f46dc9..0000000
--- a/core/src/main/java/org/apache/accumulo/core/security/VisibilityParseException.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.security;
-
-import java.text.ParseException;
-
-public class VisibilityParseException extends ParseException {
-  private static final long serialVersionUID = 1L;
-  private String visibility;
-  
-  public VisibilityParseException(String reason, byte[] visibility, int errorOffset) {
-    super(reason, errorOffset);
-    this.visibility = new String(visibility);
-  }
-  
-  @Override
-  public String getMessage() {
-    return super.getMessage() + " in string '" + visibility + "' at position " + super.getErrorOffset();
-  }
-}


[02/15] ACCUMULO-652 initial mods to RFile to keep track of extra block statistics

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/examples/wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/EventFields.java
----------------------------------------------------------------------
diff --git a/examples/wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/EventFields.java b/examples/wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/EventFields.java
index bd43088..ca2d22d 100644
--- a/examples/wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/EventFields.java
+++ b/examples/wikisearch/query/src/main/java/org/apache/accumulo/examples/wikisearch/parser/EventFields.java
@@ -72,14 +72,14 @@ public class EventFields implements SetMultimap<String,FieldValue>, CustomSerial
     }
     
     public int size() {
-      return visibility.flatten().length + value.length;
+      return visibility.getExpression().length + value.length;
     }
     
     @Override
     public String toString() {
       StringBuilder buf = new StringBuilder();
       if (null != visibility)
-        buf.append(" visibility: ").append(new String(visibility.flatten()));
+        buf.append(" visibility: ").append(new String(visibility.getExpression()));
       if (null != value)
         buf.append(" value size: ").append(value.length);
       if (null != value)
@@ -219,7 +219,7 @@ public class EventFields implements SetMultimap<String,FieldValue>, CustomSerial
       // Write the key
       StringSerializer.put(buf, entry.getKey());
       // Write the fields in the value
-      valueSerializer.writeObjectData(buf, entry.getValue().getVisibility().flatten());
+      valueSerializer.writeObjectData(buf, entry.getValue().getVisibility().getExpression());
       valueSerializer.writeObjectData(buf, entry.getValue().getValue());
     }
   }


[12/15] ACCUMULO-652 merged changes from trunk

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PIteratorSetting.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/PIteratorSetting.java
index 0000000,0000000..fbbfbe5
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PIteratorSetting.java
@@@ -1,0 -1,0 +1,763 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings("all") public class PIteratorSetting implements org.apache.thrift.TBase<PIteratorSetting, PIteratorSetting._Fields>, java.io.Serializable, Cloneable {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PIteratorSetting");
++
++  private static final org.apache.thrift.protocol.TField PRIORITY_FIELD_DESC = new org.apache.thrift.protocol.TField("priority", org.apache.thrift.protocol.TType.I32, (short)1);
++  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2);
++  private static final org.apache.thrift.protocol.TField ITERATOR_CLASS_FIELD_DESC = new org.apache.thrift.protocol.TField("iteratorClass", org.apache.thrift.protocol.TType.STRING, (short)3);
++  private static final org.apache.thrift.protocol.TField PROPERTIES_FIELD_DESC = new org.apache.thrift.protocol.TField("properties", org.apache.thrift.protocol.TType.MAP, (short)4);
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new PIteratorSettingStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new PIteratorSettingTupleSchemeFactory());
++  }
++
++  public int priority; // required
++  public String name; // required
++  public String iteratorClass; // required
++  public Map<String,String> properties; // required
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++    PRIORITY((short)1, "priority"),
++    NAME((short)2, "name"),
++    ITERATOR_CLASS((short)3, "iteratorClass"),
++    PROPERTIES((short)4, "properties");
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        case 1: // PRIORITY
++          return PRIORITY;
++        case 2: // NAME
++          return NAME;
++        case 3: // ITERATOR_CLASS
++          return ITERATOR_CLASS;
++        case 4: // PROPERTIES
++          return PROPERTIES;
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++
++  // isset id assignments
++  private static final int __PRIORITY_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    tmpMap.put(_Fields.PRIORITY, new org.apache.thrift.meta_data.FieldMetaData("priority", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
++    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.ITERATOR_CLASS, new org.apache.thrift.meta_data.FieldMetaData("iteratorClass", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.PROPERTIES, new org.apache.thrift.meta_data.FieldMetaData("properties", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
++            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
++            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PIteratorSetting.class, metaDataMap);
++  }
++
++  public PIteratorSetting() {
++  }
++
++  public PIteratorSetting(
++    int priority,
++    String name,
++    String iteratorClass,
++    Map<String,String> properties)
++  {
++    this();
++    this.priority = priority;
++    setPriorityIsSet(true);
++    this.name = name;
++    this.iteratorClass = iteratorClass;
++    this.properties = properties;
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public PIteratorSetting(PIteratorSetting other) {
++    __isset_bitfield = other.__isset_bitfield;
++    this.priority = other.priority;
++    if (other.isSetName()) {
++      this.name = other.name;
++    }
++    if (other.isSetIteratorClass()) {
++      this.iteratorClass = other.iteratorClass;
++    }
++    if (other.isSetProperties()) {
++      Map<String,String> __this__properties = new HashMap<String,String>();
++      for (Map.Entry<String, String> other_element : other.properties.entrySet()) {
++
++        String other_element_key = other_element.getKey();
++        String other_element_value = other_element.getValue();
++
++        String __this__properties_copy_key = other_element_key;
++
++        String __this__properties_copy_value = other_element_value;
++
++        __this__properties.put(__this__properties_copy_key, __this__properties_copy_value);
++      }
++      this.properties = __this__properties;
++    }
++  }
++
++  public PIteratorSetting deepCopy() {
++    return new PIteratorSetting(this);
++  }
++
++  @Override
++  public void clear() {
++    setPriorityIsSet(false);
++    this.priority = 0;
++    this.name = null;
++    this.iteratorClass = null;
++    this.properties = null;
++  }
++
++  public int getPriority() {
++    return this.priority;
++  }
++
++  public PIteratorSetting setPriority(int priority) {
++    this.priority = priority;
++    setPriorityIsSet(true);
++    return this;
++  }
++
++  public void unsetPriority() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PRIORITY_ISSET_ID);
++  }
++
++  /** Returns true if field priority is set (has been assigned a value) and false otherwise */
++  public boolean isSetPriority() {
++    return EncodingUtils.testBit(__isset_bitfield, __PRIORITY_ISSET_ID);
++  }
++
++  public void setPriorityIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PRIORITY_ISSET_ID, value);
++  }
++
++  public String getName() {
++    return this.name;
++  }
++
++  public PIteratorSetting setName(String name) {
++    this.name = name;
++    return this;
++  }
++
++  public void unsetName() {
++    this.name = null;
++  }
++
++  /** Returns true if field name is set (has been assigned a value) and false otherwise */
++  public boolean isSetName() {
++    return this.name != null;
++  }
++
++  public void setNameIsSet(boolean value) {
++    if (!value) {
++      this.name = null;
++    }
++  }
++
++  public String getIteratorClass() {
++    return this.iteratorClass;
++  }
++
++  public PIteratorSetting setIteratorClass(String iteratorClass) {
++    this.iteratorClass = iteratorClass;
++    return this;
++  }
++
++  public void unsetIteratorClass() {
++    this.iteratorClass = null;
++  }
++
++  /** Returns true if field iteratorClass is set (has been assigned a value) and false otherwise */
++  public boolean isSetIteratorClass() {
++    return this.iteratorClass != null;
++  }
++
++  public void setIteratorClassIsSet(boolean value) {
++    if (!value) {
++      this.iteratorClass = null;
++    }
++  }
++
++  public int getPropertiesSize() {
++    return (this.properties == null) ? 0 : this.properties.size();
++  }
++
++  public void putToProperties(String key, String val) {
++    if (this.properties == null) {
++      this.properties = new HashMap<String,String>();
++    }
++    this.properties.put(key, val);
++  }
++
++  public Map<String,String> getProperties() {
++    return this.properties;
++  }
++
++  public PIteratorSetting setProperties(Map<String,String> properties) {
++    this.properties = properties;
++    return this;
++  }
++
++  public void unsetProperties() {
++    this.properties = null;
++  }
++
++  /** Returns true if field properties is set (has been assigned a value) and false otherwise */
++  public boolean isSetProperties() {
++    return this.properties != null;
++  }
++
++  public void setPropertiesIsSet(boolean value) {
++    if (!value) {
++      this.properties = null;
++    }
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    case PRIORITY:
++      if (value == null) {
++        unsetPriority();
++      } else {
++        setPriority((Integer)value);
++      }
++      break;
++
++    case NAME:
++      if (value == null) {
++        unsetName();
++      } else {
++        setName((String)value);
++      }
++      break;
++
++    case ITERATOR_CLASS:
++      if (value == null) {
++        unsetIteratorClass();
++      } else {
++        setIteratorClass((String)value);
++      }
++      break;
++
++    case PROPERTIES:
++      if (value == null) {
++        unsetProperties();
++      } else {
++        setProperties((Map<String,String>)value);
++      }
++      break;
++
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    case PRIORITY:
++      return Integer.valueOf(getPriority());
++
++    case NAME:
++      return getName();
++
++    case ITERATOR_CLASS:
++      return getIteratorClass();
++
++    case PROPERTIES:
++      return getProperties();
++
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    case PRIORITY:
++      return isSetPriority();
++    case NAME:
++      return isSetName();
++    case ITERATOR_CLASS:
++      return isSetIteratorClass();
++    case PROPERTIES:
++      return isSetProperties();
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof PIteratorSetting)
++      return this.equals((PIteratorSetting)that);
++    return false;
++  }
++
++  public boolean equals(PIteratorSetting that) {
++    if (that == null)
++      return false;
++
++    boolean this_present_priority = true;
++    boolean that_present_priority = true;
++    if (this_present_priority || that_present_priority) {
++      if (!(this_present_priority && that_present_priority))
++        return false;
++      if (this.priority != that.priority)
++        return false;
++    }
++
++    boolean this_present_name = true && this.isSetName();
++    boolean that_present_name = true && that.isSetName();
++    if (this_present_name || that_present_name) {
++      if (!(this_present_name && that_present_name))
++        return false;
++      if (!this.name.equals(that.name))
++        return false;
++    }
++
++    boolean this_present_iteratorClass = true && this.isSetIteratorClass();
++    boolean that_present_iteratorClass = true && that.isSetIteratorClass();
++    if (this_present_iteratorClass || that_present_iteratorClass) {
++      if (!(this_present_iteratorClass && that_present_iteratorClass))
++        return false;
++      if (!this.iteratorClass.equals(that.iteratorClass))
++        return false;
++    }
++
++    boolean this_present_properties = true && this.isSetProperties();
++    boolean that_present_properties = true && that.isSetProperties();
++    if (this_present_properties || that_present_properties) {
++      if (!(this_present_properties && that_present_properties))
++        return false;
++      if (!this.properties.equals(that.properties))
++        return false;
++    }
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    return 0;
++  }
++
++  public int compareTo(PIteratorSetting other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++    PIteratorSetting typedOther = (PIteratorSetting)other;
++
++    lastComparison = Boolean.valueOf(isSetPriority()).compareTo(typedOther.isSetPriority());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetPriority()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.priority, typedOther.priority);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetName()).compareTo(typedOther.isSetName());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetName()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, typedOther.name);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetIteratorClass()).compareTo(typedOther.isSetIteratorClass());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIteratorClass()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.iteratorClass, typedOther.iteratorClass);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetProperties()).compareTo(typedOther.isSetProperties());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetProperties()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.properties, typedOther.properties);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("PIteratorSetting(");
++    boolean first = true;
++
++    sb.append("priority:");
++    sb.append(this.priority);
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("name:");
++    if (this.name == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.name);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("iteratorClass:");
++    if (this.iteratorClass == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.iteratorClass);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("properties:");
++    if (this.properties == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.properties);
++    }
++    first = false;
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    // check for sub-struct validity
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class PIteratorSettingStandardSchemeFactory implements SchemeFactory {
++    public PIteratorSettingStandardScheme getScheme() {
++      return new PIteratorSettingStandardScheme();
++    }
++  }
++
++  private static class PIteratorSettingStandardScheme extends StandardScheme<PIteratorSetting> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, PIteratorSetting struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          case 1: // PRIORITY
++            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
++              struct.priority = iprot.readI32();
++              struct.setPriorityIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 2: // NAME
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.name = iprot.readString();
++              struct.setNameIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 3: // ITERATOR_CLASS
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.iteratorClass = iprot.readString();
++              struct.setIteratorClassIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 4: // PROPERTIES
++            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
++              {
++                org.apache.thrift.protocol.TMap _map8 = iprot.readMapBegin();
++                struct.properties = new HashMap<String,String>(2*_map8.size);
++                for (int _i9 = 0; _i9 < _map8.size; ++_i9)
++                {
++                  String _key10; // required
++                  String _val11; // required
++                  _key10 = iprot.readString();
++                  _val11 = iprot.readString();
++                  struct.properties.put(_key10, _val11);
++                }
++                iprot.readMapEnd();
++              }
++              struct.setPropertiesIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++
++      // check for required fields of primitive type, which can't be checked in the validate method
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, PIteratorSetting struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      oprot.writeFieldBegin(PRIORITY_FIELD_DESC);
++      oprot.writeI32(struct.priority);
++      oprot.writeFieldEnd();
++      if (struct.name != null) {
++        oprot.writeFieldBegin(NAME_FIELD_DESC);
++        oprot.writeString(struct.name);
++        oprot.writeFieldEnd();
++      }
++      if (struct.iteratorClass != null) {
++        oprot.writeFieldBegin(ITERATOR_CLASS_FIELD_DESC);
++        oprot.writeString(struct.iteratorClass);
++        oprot.writeFieldEnd();
++      }
++      if (struct.properties != null) {
++        oprot.writeFieldBegin(PROPERTIES_FIELD_DESC);
++        {
++          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size()));
++          for (Map.Entry<String, String> _iter12 : struct.properties.entrySet())
++          {
++            oprot.writeString(_iter12.getKey());
++            oprot.writeString(_iter12.getValue());
++          }
++          oprot.writeMapEnd();
++        }
++        oprot.writeFieldEnd();
++      }
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class PIteratorSettingTupleSchemeFactory implements SchemeFactory {
++    public PIteratorSettingTupleScheme getScheme() {
++      return new PIteratorSettingTupleScheme();
++    }
++  }
++
++  private static class PIteratorSettingTupleScheme extends TupleScheme<PIteratorSetting> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, PIteratorSetting struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++      BitSet optionals = new BitSet();
++      if (struct.isSetPriority()) {
++        optionals.set(0);
++      }
++      if (struct.isSetName()) {
++        optionals.set(1);
++      }
++      if (struct.isSetIteratorClass()) {
++        optionals.set(2);
++      }
++      if (struct.isSetProperties()) {
++        optionals.set(3);
++      }
++      oprot.writeBitSet(optionals, 4);
++      if (struct.isSetPriority()) {
++        oprot.writeI32(struct.priority);
++      }
++      if (struct.isSetName()) {
++        oprot.writeString(struct.name);
++      }
++      if (struct.isSetIteratorClass()) {
++        oprot.writeString(struct.iteratorClass);
++      }
++      if (struct.isSetProperties()) {
++        {
++          oprot.writeI32(struct.properties.size());
++          for (Map.Entry<String, String> _iter13 : struct.properties.entrySet())
++          {
++            oprot.writeString(_iter13.getKey());
++            oprot.writeString(_iter13.getValue());
++          }
++        }
++      }
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, PIteratorSetting struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++      BitSet incoming = iprot.readBitSet(4);
++      if (incoming.get(0)) {
++        struct.priority = iprot.readI32();
++        struct.setPriorityIsSet(true);
++      }
++      if (incoming.get(1)) {
++        struct.name = iprot.readString();
++        struct.setNameIsSet(true);
++      }
++      if (incoming.get(2)) {
++        struct.iteratorClass = iprot.readString();
++        struct.setIteratorClassIsSet(true);
++      }
++      if (incoming.get(3)) {
++        {
++          org.apache.thrift.protocol.TMap _map14 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++          struct.properties = new HashMap<String,String>(2*_map14.size);
++          for (int _i15 = 0; _i15 < _map14.size; ++_i15)
++          {
++            String _key16; // required
++            String _val17; // required
++            _key16 = iprot.readString();
++            _val17 = iprot.readString();
++            struct.properties.put(_key16, _val17);
++          }
++        }
++        struct.setPropertiesIsSet(true);
++      }
++    }
++  }
++
++}
++

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PKey.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/PKey.java
index 0000000,0000000..5f2c81f
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PKey.java
@@@ -1,0 -1,0 +1,846 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings("all") public class PKey implements org.apache.thrift.TBase<PKey, PKey._Fields>, java.io.Serializable, Cloneable {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PKey");
++
++  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1);
++  private static final org.apache.thrift.protocol.TField COL_FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("colFamily", org.apache.thrift.protocol.TType.STRING, (short)2);
++  private static final org.apache.thrift.protocol.TField COL_QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("colQualifier", org.apache.thrift.protocol.TType.STRING, (short)3);
++  private static final org.apache.thrift.protocol.TField COL_VISIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("colVisibility", org.apache.thrift.protocol.TType.STRING, (short)4);
++  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)5);
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new PKeyStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new PKeyTupleSchemeFactory());
++  }
++
++  public ByteBuffer row; // required
++  public ByteBuffer colFamily; // required
++  public ByteBuffer colQualifier; // required
++  public ByteBuffer colVisibility; // required
++  public long timestamp; // optional
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++    ROW((short)1, "row"),
++    COL_FAMILY((short)2, "colFamily"),
++    COL_QUALIFIER((short)3, "colQualifier"),
++    COL_VISIBILITY((short)4, "colVisibility"),
++    TIMESTAMP((short)5, "timestamp");
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        case 1: // ROW
++          return ROW;
++        case 2: // COL_FAMILY
++          return COL_FAMILY;
++        case 3: // COL_QUALIFIER
++          return COL_QUALIFIER;
++        case 4: // COL_VISIBILITY
++          return COL_VISIBILITY;
++        case 5: // TIMESTAMP
++          return TIMESTAMP;
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++
++  // isset id assignments
++  private static final int __TIMESTAMP_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private _Fields optionals[] = {_Fields.TIMESTAMP};
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.COL_FAMILY, new org.apache.thrift.meta_data.FieldMetaData("colFamily", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.COL_QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("colQualifier", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.COL_VISIBILITY, new org.apache.thrift.meta_data.FieldMetaData("colVisibility", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PKey.class, metaDataMap);
++  }
++
++  public PKey() {
++  }
++
++  public PKey(
++    ByteBuffer row,
++    ByteBuffer colFamily,
++    ByteBuffer colQualifier,
++    ByteBuffer colVisibility)
++  {
++    this();
++    this.row = row;
++    this.colFamily = colFamily;
++    this.colQualifier = colQualifier;
++    this.colVisibility = colVisibility;
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public PKey(PKey other) {
++    __isset_bitfield = other.__isset_bitfield;
++    if (other.isSetRow()) {
++      this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row);
++;
++    }
++    if (other.isSetColFamily()) {
++      this.colFamily = org.apache.thrift.TBaseHelper.copyBinary(other.colFamily);
++;
++    }
++    if (other.isSetColQualifier()) {
++      this.colQualifier = org.apache.thrift.TBaseHelper.copyBinary(other.colQualifier);
++;
++    }
++    if (other.isSetColVisibility()) {
++      this.colVisibility = org.apache.thrift.TBaseHelper.copyBinary(other.colVisibility);
++;
++    }
++    this.timestamp = other.timestamp;
++  }
++
++  public PKey deepCopy() {
++    return new PKey(this);
++  }
++
++  @Override
++  public void clear() {
++    this.row = null;
++    this.colFamily = null;
++    this.colQualifier = null;
++    this.colVisibility = null;
++    setTimestampIsSet(false);
++    this.timestamp = 0;
++  }
++
++  public byte[] getRow() {
++    setRow(org.apache.thrift.TBaseHelper.rightSize(row));
++    return row == null ? null : row.array();
++  }
++
++  public ByteBuffer bufferForRow() {
++    return row;
++  }
++
++  public PKey setRow(byte[] row) {
++    setRow(row == null ? (ByteBuffer)null : ByteBuffer.wrap(row));
++    return this;
++  }
++
++  public PKey setRow(ByteBuffer row) {
++    this.row = row;
++    return this;
++  }
++
++  public void unsetRow() {
++    this.row = null;
++  }
++
++  /** Returns true if field row is set (has been assigned a value) and false otherwise */
++  public boolean isSetRow() {
++    return this.row != null;
++  }
++
++  public void setRowIsSet(boolean value) {
++    if (!value) {
++      this.row = null;
++    }
++  }
++
++  public byte[] getColFamily() {
++    setColFamily(org.apache.thrift.TBaseHelper.rightSize(colFamily));
++    return colFamily == null ? null : colFamily.array();
++  }
++
++  public ByteBuffer bufferForColFamily() {
++    return colFamily;
++  }
++
++  public PKey setColFamily(byte[] colFamily) {
++    setColFamily(colFamily == null ? (ByteBuffer)null : ByteBuffer.wrap(colFamily));
++    return this;
++  }
++
++  public PKey setColFamily(ByteBuffer colFamily) {
++    this.colFamily = colFamily;
++    return this;
++  }
++
++  public void unsetColFamily() {
++    this.colFamily = null;
++  }
++
++  /** Returns true if field colFamily is set (has been assigned a value) and false otherwise */
++  public boolean isSetColFamily() {
++    return this.colFamily != null;
++  }
++
++  public void setColFamilyIsSet(boolean value) {
++    if (!value) {
++      this.colFamily = null;
++    }
++  }
++
++  public byte[] getColQualifier() {
++    setColQualifier(org.apache.thrift.TBaseHelper.rightSize(colQualifier));
++    return colQualifier == null ? null : colQualifier.array();
++  }
++
++  public ByteBuffer bufferForColQualifier() {
++    return colQualifier;
++  }
++
++  public PKey setColQualifier(byte[] colQualifier) {
++    setColQualifier(colQualifier == null ? (ByteBuffer)null : ByteBuffer.wrap(colQualifier));
++    return this;
++  }
++
++  public PKey setColQualifier(ByteBuffer colQualifier) {
++    this.colQualifier = colQualifier;
++    return this;
++  }
++
++  public void unsetColQualifier() {
++    this.colQualifier = null;
++  }
++
++  /** Returns true if field colQualifier is set (has been assigned a value) and false otherwise */
++  public boolean isSetColQualifier() {
++    return this.colQualifier != null;
++  }
++
++  public void setColQualifierIsSet(boolean value) {
++    if (!value) {
++      this.colQualifier = null;
++    }
++  }
++
++  public byte[] getColVisibility() {
++    setColVisibility(org.apache.thrift.TBaseHelper.rightSize(colVisibility));
++    return colVisibility == null ? null : colVisibility.array();
++  }
++
++  public ByteBuffer bufferForColVisibility() {
++    return colVisibility;
++  }
++
++  public PKey setColVisibility(byte[] colVisibility) {
++    setColVisibility(colVisibility == null ? (ByteBuffer)null : ByteBuffer.wrap(colVisibility));
++    return this;
++  }
++
++  public PKey setColVisibility(ByteBuffer colVisibility) {
++    this.colVisibility = colVisibility;
++    return this;
++  }
++
++  public void unsetColVisibility() {
++    this.colVisibility = null;
++  }
++
++  /** Returns true if field colVisibility is set (has been assigned a value) and false otherwise */
++  public boolean isSetColVisibility() {
++    return this.colVisibility != null;
++  }
++
++  public void setColVisibilityIsSet(boolean value) {
++    if (!value) {
++      this.colVisibility = null;
++    }
++  }
++
++  public long getTimestamp() {
++    return this.timestamp;
++  }
++
++  public PKey setTimestamp(long timestamp) {
++    this.timestamp = timestamp;
++    setTimestampIsSet(true);
++    return this;
++  }
++
++  public void unsetTimestamp() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
++  }
++
++  /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
++  public boolean isSetTimestamp() {
++    return EncodingUtils.testBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
++  }
++
++  public void setTimestampIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    case ROW:
++      if (value == null) {
++        unsetRow();
++      } else {
++        setRow((ByteBuffer)value);
++      }
++      break;
++
++    case COL_FAMILY:
++      if (value == null) {
++        unsetColFamily();
++      } else {
++        setColFamily((ByteBuffer)value);
++      }
++      break;
++
++    case COL_QUALIFIER:
++      if (value == null) {
++        unsetColQualifier();
++      } else {
++        setColQualifier((ByteBuffer)value);
++      }
++      break;
++
++    case COL_VISIBILITY:
++      if (value == null) {
++        unsetColVisibility();
++      } else {
++        setColVisibility((ByteBuffer)value);
++      }
++      break;
++
++    case TIMESTAMP:
++      if (value == null) {
++        unsetTimestamp();
++      } else {
++        setTimestamp((Long)value);
++      }
++      break;
++
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    case ROW:
++      return getRow();
++
++    case COL_FAMILY:
++      return getColFamily();
++
++    case COL_QUALIFIER:
++      return getColQualifier();
++
++    case COL_VISIBILITY:
++      return getColVisibility();
++
++    case TIMESTAMP:
++      return Long.valueOf(getTimestamp());
++
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    case ROW:
++      return isSetRow();
++    case COL_FAMILY:
++      return isSetColFamily();
++    case COL_QUALIFIER:
++      return isSetColQualifier();
++    case COL_VISIBILITY:
++      return isSetColVisibility();
++    case TIMESTAMP:
++      return isSetTimestamp();
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof PKey)
++      return this.equals((PKey)that);
++    return false;
++  }
++
++  public boolean equals(PKey that) {
++    if (that == null)
++      return false;
++
++    boolean this_present_row = true && this.isSetRow();
++    boolean that_present_row = true && that.isSetRow();
++    if (this_present_row || that_present_row) {
++      if (!(this_present_row && that_present_row))
++        return false;
++      if (!this.row.equals(that.row))
++        return false;
++    }
++
++    boolean this_present_colFamily = true && this.isSetColFamily();
++    boolean that_present_colFamily = true && that.isSetColFamily();
++    if (this_present_colFamily || that_present_colFamily) {
++      if (!(this_present_colFamily && that_present_colFamily))
++        return false;
++      if (!this.colFamily.equals(that.colFamily))
++        return false;
++    }
++
++    boolean this_present_colQualifier = true && this.isSetColQualifier();
++    boolean that_present_colQualifier = true && that.isSetColQualifier();
++    if (this_present_colQualifier || that_present_colQualifier) {
++      if (!(this_present_colQualifier && that_present_colQualifier))
++        return false;
++      if (!this.colQualifier.equals(that.colQualifier))
++        return false;
++    }
++
++    boolean this_present_colVisibility = true && this.isSetColVisibility();
++    boolean that_present_colVisibility = true && that.isSetColVisibility();
++    if (this_present_colVisibility || that_present_colVisibility) {
++      if (!(this_present_colVisibility && that_present_colVisibility))
++        return false;
++      if (!this.colVisibility.equals(that.colVisibility))
++        return false;
++    }
++
++    boolean this_present_timestamp = true && this.isSetTimestamp();
++    boolean that_present_timestamp = true && that.isSetTimestamp();
++    if (this_present_timestamp || that_present_timestamp) {
++      if (!(this_present_timestamp && that_present_timestamp))
++        return false;
++      if (this.timestamp != that.timestamp)
++        return false;
++    }
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    return 0;
++  }
++
++  public int compareTo(PKey other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++    PKey typedOther = (PKey)other;
++
++    lastComparison = Boolean.valueOf(isSetRow()).compareTo(typedOther.isSetRow());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetRow()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row, typedOther.row);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetColFamily()).compareTo(typedOther.isSetColFamily());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetColFamily()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colFamily, typedOther.colFamily);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetColQualifier()).compareTo(typedOther.isSetColQualifier());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetColQualifier()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colQualifier, typedOther.colQualifier);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetColVisibility()).compareTo(typedOther.isSetColVisibility());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetColVisibility()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colVisibility, typedOther.colVisibility);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(typedOther.isSetTimestamp());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTimestamp()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, typedOther.timestamp);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("PKey(");
++    boolean first = true;
++
++    sb.append("row:");
++    if (this.row == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.row, sb);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("colFamily:");
++    if (this.colFamily == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.colFamily, sb);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("colQualifier:");
++    if (this.colQualifier == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.colQualifier, sb);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("colVisibility:");
++    if (this.colVisibility == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.colVisibility, sb);
++    }
++    first = false;
++    if (isSetTimestamp()) {
++      if (!first) sb.append(", ");
++      sb.append("timestamp:");
++      sb.append(this.timestamp);
++      first = false;
++    }
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    // check for sub-struct validity
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class PKeyStandardSchemeFactory implements SchemeFactory {
++    public PKeyStandardScheme getScheme() {
++      return new PKeyStandardScheme();
++    }
++  }
++
++  private static class PKeyStandardScheme extends StandardScheme<PKey> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, PKey struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          case 1: // ROW
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.row = iprot.readBinary();
++              struct.setRowIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 2: // COL_FAMILY
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.colFamily = iprot.readBinary();
++              struct.setColFamilyIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 3: // COL_QUALIFIER
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.colQualifier = iprot.readBinary();
++              struct.setColQualifierIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 4: // COL_VISIBILITY
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.colVisibility = iprot.readBinary();
++              struct.setColVisibilityIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 5: // TIMESTAMP
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.timestamp = iprot.readI64();
++              struct.setTimestampIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++
++      // check for required fields of primitive type, which can't be checked in the validate method
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, PKey struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      if (struct.row != null) {
++        oprot.writeFieldBegin(ROW_FIELD_DESC);
++        oprot.writeBinary(struct.row);
++        oprot.writeFieldEnd();
++      }
++      if (struct.colFamily != null) {
++        oprot.writeFieldBegin(COL_FAMILY_FIELD_DESC);
++        oprot.writeBinary(struct.colFamily);
++        oprot.writeFieldEnd();
++      }
++      if (struct.colQualifier != null) {
++        oprot.writeFieldBegin(COL_QUALIFIER_FIELD_DESC);
++        oprot.writeBinary(struct.colQualifier);
++        oprot.writeFieldEnd();
++      }
++      if (struct.colVisibility != null) {
++        oprot.writeFieldBegin(COL_VISIBILITY_FIELD_DESC);
++        oprot.writeBinary(struct.colVisibility);
++        oprot.writeFieldEnd();
++      }
++      if (struct.isSetTimestamp()) {
++        oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC);
++        oprot.writeI64(struct.timestamp);
++        oprot.writeFieldEnd();
++      }
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class PKeyTupleSchemeFactory implements SchemeFactory {
++    public PKeyTupleScheme getScheme() {
++      return new PKeyTupleScheme();
++    }
++  }
++
++  private static class PKeyTupleScheme extends TupleScheme<PKey> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, PKey struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++      BitSet optionals = new BitSet();
++      if (struct.isSetRow()) {
++        optionals.set(0);
++      }
++      if (struct.isSetColFamily()) {
++        optionals.set(1);
++      }
++      if (struct.isSetColQualifier()) {
++        optionals.set(2);
++      }
++      if (struct.isSetColVisibility()) {
++        optionals.set(3);
++      }
++      if (struct.isSetTimestamp()) {
++        optionals.set(4);
++      }
++      oprot.writeBitSet(optionals, 5);
++      if (struct.isSetRow()) {
++        oprot.writeBinary(struct.row);
++      }
++      if (struct.isSetColFamily()) {
++        oprot.writeBinary(struct.colFamily);
++      }
++      if (struct.isSetColQualifier()) {
++        oprot.writeBinary(struct.colQualifier);
++      }
++      if (struct.isSetColVisibility()) {
++        oprot.writeBinary(struct.colVisibility);
++      }
++      if (struct.isSetTimestamp()) {
++        oprot.writeI64(struct.timestamp);
++      }
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, PKey struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++      BitSet incoming = iprot.readBitSet(5);
++      if (incoming.get(0)) {
++        struct.row = iprot.readBinary();
++        struct.setRowIsSet(true);
++      }
++      if (incoming.get(1)) {
++        struct.colFamily = iprot.readBinary();
++        struct.setColFamilyIsSet(true);
++      }
++      if (incoming.get(2)) {
++        struct.colQualifier = iprot.readBinary();
++        struct.setColQualifierIsSet(true);
++      }
++      if (incoming.get(3)) {
++        struct.colVisibility = iprot.readBinary();
++        struct.setColVisibilityIsSet(true);
++      }
++      if (incoming.get(4)) {
++        struct.timestamp = iprot.readI64();
++        struct.setTimestampIsSet(true);
++      }
++    }
++  }
++
++}
++

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PKeyValue.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/PKeyValue.java
index 0000000,0000000..b3f776a
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PKeyValue.java
@@@ -1,0 -1,0 +1,518 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings("all") public class PKeyValue implements org.apache.thrift.TBase<PKeyValue, PKeyValue._Fields>, java.io.Serializable, Cloneable {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PKeyValue");
++
++  private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRUCT, (short)1);
++  private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)2);
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new PKeyValueStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new PKeyValueTupleSchemeFactory());
++  }
++
++  public PKey key; // required
++  public ByteBuffer value; // required
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++    KEY((short)1, "key"),
++    VALUE((short)2, "value");
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        case 1: // KEY
++          return KEY;
++        case 2: // VALUE
++          return VALUE;
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++
++  // isset id assignments
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PKey.class)));
++    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PKeyValue.class, metaDataMap);
++  }
++
++  public PKeyValue() {
++  }
++
++  public PKeyValue(
++    PKey key,
++    ByteBuffer value)
++  {
++    this();
++    this.key = key;
++    this.value = value;
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public PKeyValue(PKeyValue other) {
++    if (other.isSetKey()) {
++      this.key = new PKey(other.key);
++    }
++    if (other.isSetValue()) {
++      this.value = org.apache.thrift.TBaseHelper.copyBinary(other.value);
++;
++    }
++  }
++
++  public PKeyValue deepCopy() {
++    return new PKeyValue(this);
++  }
++
++  @Override
++  public void clear() {
++    this.key = null;
++    this.value = null;
++  }
++
++  public PKey getKey() {
++    return this.key;
++  }
++
++  public PKeyValue setKey(PKey key) {
++    this.key = key;
++    return this;
++  }
++
++  public void unsetKey() {
++    this.key = null;
++  }
++
++  /** Returns true if field key is set (has been assigned a value) and false otherwise */
++  public boolean isSetKey() {
++    return this.key != null;
++  }
++
++  public void setKeyIsSet(boolean value) {
++    if (!value) {
++      this.key = null;
++    }
++  }
++
++  public byte[] getValue() {
++    setValue(org.apache.thrift.TBaseHelper.rightSize(value));
++    return value == null ? null : value.array();
++  }
++
++  public ByteBuffer bufferForValue() {
++    return value;
++  }
++
++  public PKeyValue setValue(byte[] value) {
++    setValue(value == null ? (ByteBuffer)null : ByteBuffer.wrap(value));
++    return this;
++  }
++
++  public PKeyValue setValue(ByteBuffer value) {
++    this.value = value;
++    return this;
++  }
++
++  public void unsetValue() {
++    this.value = null;
++  }
++
++  /** Returns true if field value is set (has been assigned a value) and false otherwise */
++  public boolean isSetValue() {
++    return this.value != null;
++  }
++
++  public void setValueIsSet(boolean value) {
++    if (!value) {
++      this.value = null;
++    }
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    case KEY:
++      if (value == null) {
++        unsetKey();
++      } else {
++        setKey((PKey)value);
++      }
++      break;
++
++    case VALUE:
++      if (value == null) {
++        unsetValue();
++      } else {
++        setValue((ByteBuffer)value);
++      }
++      break;
++
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    case KEY:
++      return getKey();
++
++    case VALUE:
++      return getValue();
++
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    case KEY:
++      return isSetKey();
++    case VALUE:
++      return isSetValue();
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof PKeyValue)
++      return this.equals((PKeyValue)that);
++    return false;
++  }
++
++  public boolean equals(PKeyValue that) {
++    if (that == null)
++      return false;
++
++    boolean this_present_key = true && this.isSetKey();
++    boolean that_present_key = true && that.isSetKey();
++    if (this_present_key || that_present_key) {
++      if (!(this_present_key && that_present_key))
++        return false;
++      if (!this.key.equals(that.key))
++        return false;
++    }
++
++    boolean this_present_value = true && this.isSetValue();
++    boolean that_present_value = true && that.isSetValue();
++    if (this_present_value || that_present_value) {
++      if (!(this_present_value && that_present_value))
++        return false;
++      if (!this.value.equals(that.value))
++        return false;
++    }
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    return 0;
++  }
++
++  public int compareTo(PKeyValue other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++    PKeyValue typedOther = (PKeyValue)other;
++
++    lastComparison = Boolean.valueOf(isSetKey()).compareTo(typedOther.isSetKey());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetKey()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, typedOther.key);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetValue()).compareTo(typedOther.isSetValue());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetValue()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, typedOther.value);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("PKeyValue(");
++    boolean first = true;
++
++    sb.append("key:");
++    if (this.key == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.key);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("value:");
++    if (this.value == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.value, sb);
++    }
++    first = false;
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    // check for sub-struct validity
++    if (key != null) {
++      key.validate();
++    }
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class PKeyValueStandardSchemeFactory implements SchemeFactory {
++    public PKeyValueStandardScheme getScheme() {
++      return new PKeyValueStandardScheme();
++    }
++  }
++
++  private static class PKeyValueStandardScheme extends StandardScheme<PKeyValue> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, PKeyValue struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          case 1: // KEY
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
++              struct.key = new PKey();
++              struct.key.read(iprot);
++              struct.setKeyIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 2: // VALUE
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.value = iprot.readBinary();
++              struct.setValueIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++
++      // check for required fields of primitive type, which can't be checked in the validate method
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, PKeyValue struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      if (struct.key != null) {
++        oprot.writeFieldBegin(KEY_FIELD_DESC);
++        struct.key.write(oprot);
++        oprot.writeFieldEnd();
++      }
++      if (struct.value != null) {
++        oprot.writeFieldBegin(VALUE_FIELD_DESC);
++        oprot.writeBinary(struct.value);
++        oprot.writeFieldEnd();
++      }
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class PKeyValueTupleSchemeFactory implements SchemeFactory {
++    public PKeyValueTupleScheme getScheme() {
++      return new PKeyValueTupleScheme();
++    }
++  }
++
++  private static class PKeyValueTupleScheme extends TupleScheme<PKeyValue> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, PKeyValue struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++      BitSet optionals = new BitSet();
++      if (struct.isSetKey()) {
++        optionals.set(0);
++      }
++      if (struct.isSetValue()) {
++        optionals.set(1);
++      }
++      oprot.writeBitSet(optionals, 2);
++      if (struct.isSetKey()) {
++        struct.key.write(oprot);
++      }
++      if (struct.isSetValue()) {
++        oprot.writeBinary(struct.value);
++      }
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, PKeyValue struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++      BitSet incoming = iprot.readBitSet(2);
++      if (incoming.get(0)) {
++        struct.key = new PKey();
++        struct.key.read(iprot);
++        struct.setKeyIsSet(true);
++      }
++      if (incoming.get(1)) {
++        struct.value = iprot.readBinary();
++        struct.setValueIsSet(true);
++      }
++    }
++  }
++
++}
++

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PRange.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/PRange.java
index 0000000,0000000..8ae323d
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PRange.java
@@@ -1,0 -1,0 +1,512 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings("all") public class PRange implements org.apache.thrift.TBase<PRange, PRange._Fields>, java.io.Serializable, Cloneable {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PRange");
++
++  private static final org.apache.thrift.protocol.TField START_FIELD_DESC = new org.apache.thrift.protocol.TField("start", org.apache.thrift.protocol.TType.STRUCT, (short)1);
++  private static final org.apache.thrift.protocol.TField STOP_FIELD_DESC = new org.apache.thrift.protocol.TField("stop", org.apache.thrift.protocol.TType.STRUCT, (short)2);
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new PRangeStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new PRangeTupleSchemeFactory());
++  }
++
++  public PKey start; // required
++  public PKey stop; // required
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++    START((short)1, "start"),
++    STOP((short)2, "stop");
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        case 1: // START
++          return START;
++        case 2: // STOP
++          return STOP;
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++
++  // isset id assignments
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    tmpMap.put(_Fields.START, new org.apache.thrift.meta_data.FieldMetaData("start", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PKey.class)));
++    tmpMap.put(_Fields.STOP, new org.apache.thrift.meta_data.FieldMetaData("stop", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PKey.class)));
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PRange.class, metaDataMap);
++  }
++
++  public PRange() {
++  }
++
++  public PRange(
++    PKey start,
++    PKey stop)
++  {
++    this();
++    this.start = start;
++    this.stop = stop;
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public PRange(PRange other) {
++    if (other.isSetStart()) {
++      this.start = new PKey(other.start);
++    }
++    if (other.isSetStop()) {
++      this.stop = new PKey(other.stop);
++    }
++  }
++
++  public PRange deepCopy() {
++    return new PRange(this);
++  }
++
++  @Override
++  public void clear() {
++    this.start = null;
++    this.stop = null;
++  }
++
++  public PKey getStart() {
++    return this.start;
++  }
++
++  public PRange setStart(PKey start) {
++    this.start = start;
++    return this;
++  }
++
++  public void unsetStart() {
++    this.start = null;
++  }
++
++  /** Returns true if field start is set (has been assigned a value) and false otherwise */
++  public boolean isSetStart() {
++    return this.start != null;
++  }
++
++  public void setStartIsSet(boolean value) {
++    if (!value) {
++      this.start = null;
++    }
++  }
++
++  public PKey getStop() {
++    return this.stop;
++  }
++
++  public PRange setStop(PKey stop) {
++    this.stop = stop;
++    return this;
++  }
++
++  public void unsetStop() {
++    this.stop = null;
++  }
++
++  /** Returns true if field stop is set (has been assigned a value) and false otherwise */
++  public boolean isSetStop() {
++    return this.stop != null;
++  }
++
++  public void setStopIsSet(boolean value) {
++    if (!value) {
++      this.stop = null;
++    }
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    case START:
++      if (value == null) {
++        unsetStart();
++      } else {
++        setStart((PKey)value);
++      }
++      break;
++
++    case STOP:
++      if (value == null) {
++        unsetStop();
++      } else {
++        setStop((PKey)value);
++      }
++      break;
++
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    case START:
++      return getStart();
++
++    case STOP:
++      return getStop();
++
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    case START:
++      return isSetStart();
++    case STOP:
++      return isSetStop();
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof PRange)
++      return this.equals((PRange)that);
++    return false;
++  }
++
++  public boolean equals(PRange that) {
++    if (that == null)
++      return false;
++
++    boolean this_present_start = true && this.isSetStart();
++    boolean that_present_start = true && that.isSetStart();
++    if (this_present_start || that_present_start) {
++      if (!(this_present_start && that_present_start))
++        return false;
++      if (!this.start.equals(that.start))
++        return false;
++    }
++
++    boolean this_present_stop = true && this.isSetStop();
++    boolean that_present_stop = true && that.isSetStop();
++    if (this_present_stop || that_present_stop) {
++      if (!(this_present_stop && that_present_stop))
++        return false;
++      if (!this.stop.equals(that.stop))
++        return false;
++    }
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    return 0;
++  }
++
++  public int compareTo(PRange other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++    PRange typedOther = (PRange)other;
++
++    lastComparison = Boolean.valueOf(isSetStart()).compareTo(typedOther.isSetStart());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetStart()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start, typedOther.start);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetStop()).compareTo(typedOther.isSetStop());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetStop()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.stop, typedOther.stop);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("PRange(");
++    boolean first = true;
++
++    sb.append("start:");
++    if (this.start == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.start);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("stop:");
++    if (this.stop == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.stop);
++    }
++    first = false;
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    // check for sub-struct validity
++    if (start != null) {
++      start.validate();
++    }
++    if (stop != null) {
++      stop.validate();
++    }
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class PRangeStandardSchemeFactory implements SchemeFactory {
++    public PRangeStandardScheme getScheme() {
++      return new PRangeStandardScheme();
++    }
++  }
++
++  private static class PRangeStandardScheme extends StandardScheme<PRange> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, PRange struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          case 1: // START
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
++              struct.start = new PKey();
++              struct.start.read(iprot);
++              struct.setStartIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 2: // STOP
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
++              struct.stop = new PKey();
++              struct.stop.read(iprot);
++              struct.setStopIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++
++      // check for required fields of primitive type, which can't be checked in the validate method
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, PRange struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      if (struct.start != null) {
++        oprot.writeFieldBegin(START_FIELD_DESC);
++        struct.start.write(oprot);
++        oprot.writeFieldEnd();
++      }
++      if (struct.stop != null) {
++        oprot.writeFieldBegin(STOP_FIELD_DESC);
++        struct.stop.write(oprot);
++        oprot.writeFieldEnd();
++      }
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class PRangeTupleSchemeFactory implements SchemeFactory {
++    public PRangeTupleScheme getScheme() {
++      return new PRangeTupleScheme();
++    }
++  }
++
++  private static class PRangeTupleScheme extends TupleScheme<PRange> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, PRange struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++      BitSet optionals = new BitSet();
++      if (struct.isSetStart()) {
++        optionals.set(0);
++      }
++      if (struct.isSetStop()) {
++        optionals.set(1);
++      }
++      oprot.writeBitSet(optionals, 2);
++      if (struct.isSetStart()) {
++        struct.start.write(oprot);
++      }
++      if (struct.isSetStop()) {
++        struct.stop.write(oprot);
++      }
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, PRange struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++      BitSet incoming = iprot.readBitSet(2);
++      if (incoming.get(0)) {
++        struct.start = new PKey();
++        struct.start.read(iprot);
++        struct.setStartIsSet(true);
++      }
++      if (incoming.get(1)) {
++        struct.stop = new PKey();
++        struct.stop.read(iprot);
++        struct.setStopIsSet(true);
++      }
++    }
++  }
++
++}
++


[05/15] git commit: ACCUMULO-652 changed version

Posted by el...@apache.org.
ACCUMULO-652 changed version

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1354541 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/064403b5
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/064403b5
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/064403b5

Branch: refs/heads/ACCUMULO-652
Commit: 064403b51bd4ca67948c8b125b6359addb659e3d
Parents: 80ee809
Author: Adam Fuchs <af...@apache.org>
Authored: Wed Jun 27 14:38:44 2012 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Wed Jun 27 14:38:44 2012 +0000

----------------------------------------------------------------------
 README                                                |  8 ++++----
 assemble/pom.xml                                      |  2 +-
 bin/config.sh                                         |  2 +-
 core/pom.xml                                          |  2 +-
 .../main/java/org/apache/accumulo/core/Constants.java |  2 +-
 .../org/apache/accumulo/core/conf/config.html         |  2 +-
 docs/combiners.html                                   |  2 +-
 docs/examples/README.bloom                            |  6 +++---
 docs/examples/README.combiner                         |  2 +-
 docs/examples/README.constraints                      |  2 +-
 docs/examples/README.mapred                           |  2 +-
 docs/examples/README.maxmutation                      |  2 +-
 examples/pom.xml                                      |  2 +-
 examples/simple/pom.xml                               |  2 +-
 examples/wikisearch/ingest/bin/ingest.sh              |  2 +-
 examples/wikisearch/ingest/bin/ingest_parallel.sh     |  2 +-
 examples/wikisearch/ingest/pom.xml                    |  2 +-
 examples/wikisearch/pom.xml                           |  2 +-
 examples/wikisearch/query-war/pom.xml                 |  2 +-
 examples/wikisearch/query/pom.xml                     |  2 +-
 pom.xml                                               | 14 +++++++-------
 server/pom.xml                                        |  2 +-
 server/src/main/c++/mlock/Makefile                    |  2 +-
 server/src/main/c++/nativeMap/Makefile                |  2 +-
 start/pom.xml                                         |  2 +-
 trace/pom.xml                                         |  2 +-
 26 files changed, 37 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/README
----------------------------------------------------------------------
diff --git a/README b/README
index 9ad04a4..41f6efc 100644
--- a/README
+++ b/README
@@ -36,7 +36,7 @@ easier to install.
 
 Copy the accumulo tar file produced by mvn package from the src/assemble/target/
 directory to the desired destination, then untar it (e.g. 
-tar xvzf accumulo-1.5.0-SNAPSHOT-dist.tar.gz).
+tar xvzf accumulo-ACCUMULO-652-SNAPSHOT-dist.tar.gz).
 
 If you are using the RPM, install the RPM on every machine that will run
 accumulo.
@@ -63,7 +63,7 @@ found in the same location on every machine in the cluster.  You will need to
 have password-less ssh set up as described in the hadoop documentation. 
 
 You will need to have hadoop installed and configured on your system.
-Accumulo 1.5.0-SNAPSHOT has been tested with hadoop version 0.20.2.
+Accumulo ACCUMULO-652-SNAPSHOT has been tested with hadoop version 0.20.2.
 
 The example accumulo configuration files are placed in directories based on the 
 memory footprint for the accumulo processes.  If you are using native libraries
@@ -158,7 +158,7 @@ scanned.
 
     Shell - Apache Accumulo Interactive Shell
     - 
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: accumulo
     - instance id: f5947fe6-081e-41a8-9877-43730c4dfc6f
     - 
@@ -179,7 +179,7 @@ certain column.
 
     Shell - Apache Accumulo Interactive Shell
     - 
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: accumulo
     - instance id: f5947fe6-081e-41a8-9877-43730c4dfc6f
     - 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/assemble/pom.xml
----------------------------------------------------------------------
diff --git a/assemble/pom.xml b/assemble/pom.xml
index 24c2b8b..c3d585a 100644
--- a/assemble/pom.xml
+++ b/assemble/pom.xml
@@ -15,7 +15,7 @@
 	<parent>
 		<groupId>org.apache.accumulo</groupId>
 		<artifactId>accumulo</artifactId>
-		<version>1.5.0-SNAPSHOT</version>
+		<version>ACCUMULO-652-SNAPSHOT</version>
 	</parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/bin/config.sh
----------------------------------------------------------------------
diff --git a/bin/config.sh b/bin/config.sh
index 3dbbea4..ca5e575 100755
--- a/bin/config.sh
+++ b/bin/config.sh
@@ -46,7 +46,7 @@ mkdir -p $ACCUMULO_LOG_DIR 2>/dev/null
 export ACCUMULO_LOG_DIR
 
 if [ -z ${ACCUMULO_VERSION} ]; then
-        ACCUMULO_VERSION=1.5.0-SNAPSHOT
+        ACCUMULO_VERSION=ACCUMULO-652-SNAPSHOT
 fi
 
 if [ -z "$HADOOP_HOME" ]

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/core/pom.xml
----------------------------------------------------------------------
diff --git a/core/pom.xml b/core/pom.xml
index 6b563d8..1ac081e 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.accumulo</groupId>
     <artifactId>accumulo</artifactId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/core/src/main/java/org/apache/accumulo/core/Constants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/Constants.java b/core/src/main/java/org/apache/accumulo/core/Constants.java
index 36106c1..4cdbca0 100644
--- a/core/src/main/java/org/apache/accumulo/core/Constants.java
+++ b/core/src/main/java/org/apache/accumulo/core/Constants.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 
 public class Constants {
-  public static final String VERSION = "1.5.0-SNAPSHOT";
+  public static final String VERSION = "ACCUMULO-652-SNAPSHOT";
   public static final int DATA_VERSION = 4;
   public static final int PREV_DATA_VERSION = 3;
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/core/src/main/resources/org/apache/accumulo/core/conf/config.html
----------------------------------------------------------------------
diff --git a/core/src/main/resources/org/apache/accumulo/core/conf/config.html b/core/src/main/resources/org/apache/accumulo/core/conf/config.html
index 21d7438..e2c9d6b 100644
--- a/core/src/main/resources/org/apache/accumulo/core/conf/config.html
+++ b/core/src/main/resources/org/apache/accumulo/core/conf/config.html
@@ -55,7 +55,7 @@
 
     Shell - Apache Accumulo Interactive Shell
     - 
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: ac14
     - instance id: 4f48fa03-f692-43ce-ae03-94c9ea8b7181
     - 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/docs/combiners.html
----------------------------------------------------------------------
diff --git a/docs/combiners.html b/docs/combiners.html
index ce7fec7..a4bf325 100644
--- a/docs/combiners.html
+++ b/docs/combiners.html
@@ -30,7 +30,7 @@
 <p><pre>
 
 Shell - Apache Accumulo Interactive Shell
-- version: 1.5.0-SNAPSHOT
+- version: ACCUMULO-652-SNAPSHOT
 - instance id: 863fc0d1-3623-4b6c-8c23-7d4fdb1c8a49
 - 
 - type 'help' for a list of available commands

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/docs/examples/README.bloom
----------------------------------------------------------------------
diff --git a/docs/examples/README.bloom b/docs/examples/README.bloom
index d948bf7..663e895 100644
--- a/docs/examples/README.bloom
+++ b/docs/examples/README.bloom
@@ -24,7 +24,7 @@ Below table named bloom_test is created and bloom filters are enabled.
 
     $ ./bin/accumulo shell -u username -p password
     Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: instance
     - instance id: 00000000-0000-0000-0000-000000000000
     - 
@@ -102,7 +102,7 @@ The commands for creating the first table without bloom filters are below.
 
     $ ./bin/accumulo shell -u username -p password
     Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: instance
     - instance id: 00000000-0000-0000-0000-000000000000
     - 
@@ -124,7 +124,7 @@ The commands for creating the second table with bloom filers are below.
 
     $ ./bin/accumulo shell -u username -p password
     Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: instance
     - instance id: 00000000-0000-0000-0000-000000000000
     - 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/docs/examples/README.combiner
----------------------------------------------------------------------
diff --git a/docs/examples/README.combiner b/docs/examples/README.combiner
index d180218..6a07cf8 100644
--- a/docs/examples/README.combiner
+++ b/docs/examples/README.combiner
@@ -29,7 +29,7 @@ tar distribution.
     
     Shell - Apache Accumulo Interactive Shell
     - 
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: instance
     - instance id: 00000000-0000-0000-0000-000000000000
     - 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/docs/examples/README.constraints
----------------------------------------------------------------------
diff --git a/docs/examples/README.constraints b/docs/examples/README.constraints
index 32a8f59..eb90b44 100644
--- a/docs/examples/README.constraints
+++ b/docs/examples/README.constraints
@@ -31,7 +31,7 @@ the end shows the inserts were not allowed.
     
     Shell - Apache Accumulo Interactive Shell
     - 
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: instance
     - instance id: 00000000-0000-0000-0000-000000000000
     - 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/docs/examples/README.mapred
----------------------------------------------------------------------
diff --git a/docs/examples/README.mapred b/docs/examples/README.mapred
index 86e2b2e..702d318 100644
--- a/docs/examples/README.mapred
+++ b/docs/examples/README.mapred
@@ -33,7 +33,7 @@ for the column family count.
 
     $ ./bin/accumulo shell -u username -p password
     Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: instance
     - instance id: 00000000-0000-0000-0000-000000000000
     - 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/docs/examples/README.maxmutation
----------------------------------------------------------------------
diff --git a/docs/examples/README.maxmutation b/docs/examples/README.maxmutation
index f915187..4e6a0ec 100644
--- a/docs/examples/README.maxmutation
+++ b/docs/examples/README.maxmutation
@@ -27,7 +27,7 @@ a table to reject very large mutations.
     
     Shell - Apache Accumulo Interactive Shell
     - 
-    - version: 1.5.0-SNAPSHOT
+    - version: ACCUMULO-652-SNAPSHOT
     - instance name: instance
     - instance id: 00000000-0000-0000-0000-000000000000
     - 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/examples/pom.xml
----------------------------------------------------------------------
diff --git a/examples/pom.xml b/examples/pom.xml
index 74f1a18..320090c 100644
--- a/examples/pom.xml
+++ b/examples/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <artifactId>accumulo</artifactId>
     <groupId>org.apache.accumulo</groupId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
   <artifactId>accumulo-examples</artifactId>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/examples/simple/pom.xml
----------------------------------------------------------------------
diff --git a/examples/simple/pom.xml b/examples/simple/pom.xml
index d902b1f..6823ef7 100644
--- a/examples/simple/pom.xml
+++ b/examples/simple/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.accumulo</groupId>
     <artifactId>accumulo-examples</artifactId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/examples/wikisearch/ingest/bin/ingest.sh
----------------------------------------------------------------------
diff --git a/examples/wikisearch/ingest/bin/ingest.sh b/examples/wikisearch/ingest/bin/ingest.sh
index dbb9b05..2597130 100755
--- a/examples/wikisearch/ingest/bin/ingest.sh
+++ b/examples/wikisearch/ingest/bin/ingest.sh
@@ -38,7 +38,7 @@ LIBJARS=`echo $CLASSPATH | sed 's/^://' | sed 's/:/,/g'`
 #
 # Map/Reduce job
 #
-JAR=$SCRIPT_DIR/../lib/wikisearch-ingest-1.5.0-SNAPSHOT.jar
+JAR=$SCRIPT_DIR/../lib/wikisearch-ingest-ACCUMULO-652-SNAPSHOT.jar
 CONF=$SCRIPT_DIR/../conf/wikipedia.xml
 HDFS_DATA_DIR=$1
 export HADOOP_CLASSPATH=$CLASSPATH

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/examples/wikisearch/ingest/bin/ingest_parallel.sh
----------------------------------------------------------------------
diff --git a/examples/wikisearch/ingest/bin/ingest_parallel.sh b/examples/wikisearch/ingest/bin/ingest_parallel.sh
index 003b7f9..a712e39 100755
--- a/examples/wikisearch/ingest/bin/ingest_parallel.sh
+++ b/examples/wikisearch/ingest/bin/ingest_parallel.sh
@@ -38,7 +38,7 @@ LIBJARS=`echo $CLASSPATH | sed 's/^://' | sed 's/:/,/g'`
 #
 # Map/Reduce job
 #
-JAR=$SCRIPT_DIR/../lib/wikisearch-ingest-1.5.0-SNAPSHOT.jar
+JAR=$SCRIPT_DIR/../lib/wikisearch-ingest-ACCUMULO-652-SNAPSHOT.jar
 CONF=$SCRIPT_DIR/../conf/wikipedia.xml
 HDFS_DATA_DIR=$1
 export HADOOP_CLASSPATH=$CLASSPATH

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/examples/wikisearch/ingest/pom.xml
----------------------------------------------------------------------
diff --git a/examples/wikisearch/ingest/pom.xml b/examples/wikisearch/ingest/pom.xml
index 4c8c297..89247f0 100644
--- a/examples/wikisearch/ingest/pom.xml
+++ b/examples/wikisearch/ingest/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <artifactId>accumulo-wikisearch</artifactId>
     <groupId>org.apache.accumulo</groupId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
 
   <artifactId>wikisearch-ingest</artifactId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/examples/wikisearch/pom.xml
----------------------------------------------------------------------
diff --git a/examples/wikisearch/pom.xml b/examples/wikisearch/pom.xml
index a0384bd..a74ea3d 100644
--- a/examples/wikisearch/pom.xml
+++ b/examples/wikisearch/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <artifactId>accumulo-examples</artifactId>
     <groupId>org.apache.accumulo</groupId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
   <artifactId>accumulo-wikisearch</artifactId>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/examples/wikisearch/query-war/pom.xml
----------------------------------------------------------------------
diff --git a/examples/wikisearch/query-war/pom.xml b/examples/wikisearch/query-war/pom.xml
index a85f972..db6f529 100644
--- a/examples/wikisearch/query-war/pom.xml
+++ b/examples/wikisearch/query-war/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <artifactId>accumulo-wikisearch</artifactId>
     <groupId>org.apache.accumulo</groupId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
 
   <artifactId>wikisearch-query-war</artifactId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/examples/wikisearch/query/pom.xml
----------------------------------------------------------------------
diff --git a/examples/wikisearch/query/pom.xml b/examples/wikisearch/query/pom.xml
index 30f430b..cc451e3 100644
--- a/examples/wikisearch/query/pom.xml
+++ b/examples/wikisearch/query/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <artifactId>accumulo-wikisearch</artifactId>
     <groupId>org.apache.accumulo</groupId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
 
   <artifactId>wikisearch-query</artifactId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 9d9fa28..d9e2950 100644
--- a/pom.xml
+++ b/pom.xml
@@ -26,7 +26,7 @@
   <groupId>org.apache.accumulo</groupId>
   <artifactId>accumulo</artifactId>
   <packaging>pom</packaging>
-  <version>1.5.0-SNAPSHOT</version>
+  <version>ACCUMULO-652-SNAPSHOT</version>
   <name>accumulo</name>
   <prerequisites>
     <maven>2.2.0</maven>
@@ -524,33 +524,33 @@
       <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>cloudtrace</artifactId>
-        <version>1.5.0-SNAPSHOT</version>
+        <version>ACCUMULO-652-SNAPSHOT</version>
       </dependency>
       <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-start</artifactId>
-        <version>1.5.0-SNAPSHOT</version>
+        <version>ACCUMULO-652-SNAPSHOT</version>
       </dependency>
       <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-core</artifactId>
-        <version>1.5.0-SNAPSHOT</version>
+        <version>ACCUMULO-652-SNAPSHOT</version>
       </dependency>
       <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-server</artifactId>
-        <version>1.5.0-SNAPSHOT</version>
+        <version>ACCUMULO-652-SNAPSHOT</version>
       </dependency>
       <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>examples-simple</artifactId>
-        <version>1.5.0-SNAPSHOT</version>
+        <version>ACCUMULO-652-SNAPSHOT</version>
       </dependency>
       <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>wikisearch-query-war</artifactId>
         <type>war</type>
-        <version>1.5.0-SNAPSHOT</version>
+        <version>ACCUMULO-652-SNAPSHOT</version>
       </dependency>
 
       <!-- additional dependencies we need to provide -->

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/server/pom.xml
----------------------------------------------------------------------
diff --git a/server/pom.xml b/server/pom.xml
index 50cab45..4e12c77 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.accumulo</groupId>
     <artifactId>accumulo</artifactId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/server/src/main/c++/mlock/Makefile
----------------------------------------------------------------------
diff --git a/server/src/main/c++/mlock/Makefile b/server/src/main/c++/mlock/Makefile
index 42c3add..768d751 100644
--- a/server/src/main/c++/mlock/Makefile
+++ b/server/src/main/c++/mlock/Makefile
@@ -37,7 +37,7 @@ clean:
 	rm -f $(INSTALLED_LIBS) $(LIBS)
 
 org_apache_accumulo_server_tabletserver_$(LIB).h : ../../java/org/apache/accumulo/server/tabletserver/$(LIB).java
-	javah -classpath ../../../../../lib/accumulo-server-1.5.0-SNAPSHOT.jar org.apache.accumulo.server.tabletserver.$(LIB)
+	javah -classpath ../../../../../lib/accumulo-server-ACCUMULO-652-SNAPSHOT.jar org.apache.accumulo.server.tabletserver.$(LIB)
 
 $(INSTALLED_LIBS) : $(INSTALL_DIR) $(LIBS)
 	cp $(LIBS) $(INSTALL_DIR)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/server/src/main/c++/nativeMap/Makefile
----------------------------------------------------------------------
diff --git a/server/src/main/c++/nativeMap/Makefile b/server/src/main/c++/nativeMap/Makefile
index ea6b7e6..e738f2d 100644
--- a/server/src/main/c++/nativeMap/Makefile
+++ b/server/src/main/c++/nativeMap/Makefile
@@ -36,7 +36,7 @@ clean:
 	rm -f $(INSTALLED_LIBS) $(LIBS)
 
 org_apache_accumulo_server_tabletserver_NativeMap.h : ../../java/org/apache/accumulo/server/tabletserver/NativeMap.java
-	javah -classpath ../../../../../lib/accumulo-server-1.5.0-SNAPSHOT.jar org.apache.accumulo.server.tabletserver.NativeMap
+	javah -classpath ../../../../../lib/accumulo-server-ACCUMULO-652-SNAPSHOT.jar org.apache.accumulo.server.tabletserver.NativeMap
 
 $(INSTALLED_LIBS) : $(INSTALL_DIR) $(LIBS)
 	cp $(LIBS) $(INSTALL_DIR)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/start/pom.xml
----------------------------------------------------------------------
diff --git a/start/pom.xml b/start/pom.xml
index 490865a..a5712d8 100644
--- a/start/pom.xml
+++ b/start/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.accumulo</groupId>
     <artifactId>accumulo</artifactId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/064403b5/trace/pom.xml
----------------------------------------------------------------------
diff --git a/trace/pom.xml b/trace/pom.xml
index 36e6df1..e2212ae 100644
--- a/trace/pom.xml
+++ b/trace/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.accumulo</groupId>
     <artifactId>accumulo</artifactId>
-    <version>1.5.0-SNAPSHOT</version>
+    <version>ACCUMULO-652-SNAPSHOT</version>
   </parent>
 
   <modelVersion>4.0.0</modelVersion>


[11/15] ACCUMULO-652 merged changes from trunk

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PScanResult.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/PScanResult.java
index 0000000,0000000..4afbbbe
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PScanResult.java
@@@ -1,0 -1,0 +1,554 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings("all") public class PScanResult implements org.apache.thrift.TBase<PScanResult, PScanResult._Fields>, java.io.Serializable, Cloneable {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PScanResult");
++
++  private static final org.apache.thrift.protocol.TField RESULTS_FIELD_DESC = new org.apache.thrift.protocol.TField("results", org.apache.thrift.protocol.TType.LIST, (short)1);
++  private static final org.apache.thrift.protocol.TField MORE_FIELD_DESC = new org.apache.thrift.protocol.TField("more", org.apache.thrift.protocol.TType.BOOL, (short)2);
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new PScanResultStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new PScanResultTupleSchemeFactory());
++  }
++
++  public List<PKeyValue> results; // required
++  public boolean more; // required
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++    RESULTS((short)1, "results"),
++    MORE((short)2, "more");
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        case 1: // RESULTS
++          return RESULTS;
++        case 2: // MORE
++          return MORE;
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++
++  // isset id assignments
++  private static final int __MORE_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    tmpMap.put(_Fields.RESULTS, new org.apache.thrift.meta_data.FieldMetaData("results", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
++            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PKeyValue.class))));
++    tmpMap.put(_Fields.MORE, new org.apache.thrift.meta_data.FieldMetaData("more", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PScanResult.class, metaDataMap);
++  }
++
++  public PScanResult() {
++  }
++
++  public PScanResult(
++    List<PKeyValue> results,
++    boolean more)
++  {
++    this();
++    this.results = results;
++    this.more = more;
++    setMoreIsSet(true);
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public PScanResult(PScanResult other) {
++    __isset_bitfield = other.__isset_bitfield;
++    if (other.isSetResults()) {
++      List<PKeyValue> __this__results = new ArrayList<PKeyValue>();
++      for (PKeyValue other_element : other.results) {
++        __this__results.add(new PKeyValue(other_element));
++      }
++      this.results = __this__results;
++    }
++    this.more = other.more;
++  }
++
++  public PScanResult deepCopy() {
++    return new PScanResult(this);
++  }
++
++  @Override
++  public void clear() {
++    this.results = null;
++    setMoreIsSet(false);
++    this.more = false;
++  }
++
++  public int getResultsSize() {
++    return (this.results == null) ? 0 : this.results.size();
++  }
++
++  public java.util.Iterator<PKeyValue> getResultsIterator() {
++    return (this.results == null) ? null : this.results.iterator();
++  }
++
++  public void addToResults(PKeyValue elem) {
++    if (this.results == null) {
++      this.results = new ArrayList<PKeyValue>();
++    }
++    this.results.add(elem);
++  }
++
++  public List<PKeyValue> getResults() {
++    return this.results;
++  }
++
++  public PScanResult setResults(List<PKeyValue> results) {
++    this.results = results;
++    return this;
++  }
++
++  public void unsetResults() {
++    this.results = null;
++  }
++
++  /** Returns true if field results is set (has been assigned a value) and false otherwise */
++  public boolean isSetResults() {
++    return this.results != null;
++  }
++
++  public void setResultsIsSet(boolean value) {
++    if (!value) {
++      this.results = null;
++    }
++  }
++
++  public boolean isMore() {
++    return this.more;
++  }
++
++  public PScanResult setMore(boolean more) {
++    this.more = more;
++    setMoreIsSet(true);
++    return this;
++  }
++
++  public void unsetMore() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MORE_ISSET_ID);
++  }
++
++  /** Returns true if field more is set (has been assigned a value) and false otherwise */
++  public boolean isSetMore() {
++    return EncodingUtils.testBit(__isset_bitfield, __MORE_ISSET_ID);
++  }
++
++  public void setMoreIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MORE_ISSET_ID, value);
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    case RESULTS:
++      if (value == null) {
++        unsetResults();
++      } else {
++        setResults((List<PKeyValue>)value);
++      }
++      break;
++
++    case MORE:
++      if (value == null) {
++        unsetMore();
++      } else {
++        setMore((Boolean)value);
++      }
++      break;
++
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    case RESULTS:
++      return getResults();
++
++    case MORE:
++      return Boolean.valueOf(isMore());
++
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    case RESULTS:
++      return isSetResults();
++    case MORE:
++      return isSetMore();
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof PScanResult)
++      return this.equals((PScanResult)that);
++    return false;
++  }
++
++  public boolean equals(PScanResult that) {
++    if (that == null)
++      return false;
++
++    boolean this_present_results = true && this.isSetResults();
++    boolean that_present_results = true && that.isSetResults();
++    if (this_present_results || that_present_results) {
++      if (!(this_present_results && that_present_results))
++        return false;
++      if (!this.results.equals(that.results))
++        return false;
++    }
++
++    boolean this_present_more = true;
++    boolean that_present_more = true;
++    if (this_present_more || that_present_more) {
++      if (!(this_present_more && that_present_more))
++        return false;
++      if (this.more != that.more)
++        return false;
++    }
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    return 0;
++  }
++
++  public int compareTo(PScanResult other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++    PScanResult typedOther = (PScanResult)other;
++
++    lastComparison = Boolean.valueOf(isSetResults()).compareTo(typedOther.isSetResults());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetResults()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.results, typedOther.results);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetMore()).compareTo(typedOther.isSetMore());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetMore()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.more, typedOther.more);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("PScanResult(");
++    boolean first = true;
++
++    sb.append("results:");
++    if (this.results == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.results);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("more:");
++    sb.append(this.more);
++    first = false;
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    // check for sub-struct validity
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class PScanResultStandardSchemeFactory implements SchemeFactory {
++    public PScanResultStandardScheme getScheme() {
++      return new PScanResultStandardScheme();
++    }
++  }
++
++  private static class PScanResultStandardScheme extends StandardScheme<PScanResult> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, PScanResult struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          case 1: // RESULTS
++            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
++              {
++                org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
++                struct.results = new ArrayList<PKeyValue>(_list0.size);
++                for (int _i1 = 0; _i1 < _list0.size; ++_i1)
++                {
++                  PKeyValue _elem2; // required
++                  _elem2 = new PKeyValue();
++                  _elem2.read(iprot);
++                  struct.results.add(_elem2);
++                }
++                iprot.readListEnd();
++              }
++              struct.setResultsIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 2: // MORE
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.more = iprot.readBool();
++              struct.setMoreIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++
++      // check for required fields of primitive type, which can't be checked in the validate method
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, PScanResult struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      if (struct.results != null) {
++        oprot.writeFieldBegin(RESULTS_FIELD_DESC);
++        {
++          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.results.size()));
++          for (PKeyValue _iter3 : struct.results)
++          {
++            _iter3.write(oprot);
++          }
++          oprot.writeListEnd();
++        }
++        oprot.writeFieldEnd();
++      }
++      oprot.writeFieldBegin(MORE_FIELD_DESC);
++      oprot.writeBool(struct.more);
++      oprot.writeFieldEnd();
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class PScanResultTupleSchemeFactory implements SchemeFactory {
++    public PScanResultTupleScheme getScheme() {
++      return new PScanResultTupleScheme();
++    }
++  }
++
++  private static class PScanResultTupleScheme extends TupleScheme<PScanResult> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, PScanResult struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++      BitSet optionals = new BitSet();
++      if (struct.isSetResults()) {
++        optionals.set(0);
++      }
++      if (struct.isSetMore()) {
++        optionals.set(1);
++      }
++      oprot.writeBitSet(optionals, 2);
++      if (struct.isSetResults()) {
++        {
++          oprot.writeI32(struct.results.size());
++          for (PKeyValue _iter4 : struct.results)
++          {
++            _iter4.write(oprot);
++          }
++        }
++      }
++      if (struct.isSetMore()) {
++        oprot.writeBool(struct.more);
++      }
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, PScanResult struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++      BitSet incoming = iprot.readBitSet(2);
++      if (incoming.get(0)) {
++        {
++          org.apache.thrift.protocol.TList _list5 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++          struct.results = new ArrayList<PKeyValue>(_list5.size);
++          for (int _i6 = 0; _i6 < _list5.size; ++_i6)
++          {
++            PKeyValue _elem7; // required
++            _elem7 = new PKeyValue();
++            _elem7.read(iprot);
++            struct.results.add(_elem7);
++          }
++        }
++        struct.setResultsIsSet(true);
++      }
++      if (incoming.get(1)) {
++        struct.more = iprot.readBool();
++        struct.setMoreIsSet(true);
++      }
++    }
++  }
++
++}
++

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PSystemPermission.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/PSystemPermission.java
index 0000000,0000000..9dfdbb4
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PSystemPermission.java
@@@ -1,0 -1,0 +1,79 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++
++import java.util.Map;
++import java.util.HashMap;
++import org.apache.thrift.TEnum;
++
++@SuppressWarnings("all") public enum PSystemPermission implements org.apache.thrift.TEnum {
++  GRANT(0),
++  CREATE_TABLE(1),
++  DROP_TABLE(2),
++  ALTER_TABLE(3),
++  CREATE_USER(4),
++  DROP_USER(5),
++  ALTER_USER(6),
++  SYSTEM(7);
++
++  private final int value;
++
++  private PSystemPermission(int value) {
++    this.value = value;
++  }
++
++  /**
++   * Get the integer value of this enum value, as defined in the Thrift IDL.
++   */
++  public int getValue() {
++    return value;
++  }
++
++  /**
++   * Find a the enum type by its integer value, as defined in the Thrift IDL.
++   * @return null if the value is not found.
++   */
++  public static PSystemPermission findByValue(int value) { 
++    switch (value) {
++      case 0:
++        return GRANT;
++      case 1:
++        return CREATE_TABLE;
++      case 2:
++        return DROP_TABLE;
++      case 3:
++        return ALTER_TABLE;
++      case 4:
++        return CREATE_USER;
++      case 5:
++        return DROP_USER;
++      case 6:
++        return ALTER_USER;
++      case 7:
++        return SYSTEM;
++      default:
++        return null;
++    }
++  }
++}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PTablePermission.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/PTablePermission.java
index 0000000,0000000..1e57c9f
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PTablePermission.java
@@@ -1,0 -1,0 +1,73 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++
++import java.util.Map;
++import java.util.HashMap;
++import org.apache.thrift.TEnum;
++
++@SuppressWarnings("all") public enum PTablePermission implements org.apache.thrift.TEnum {
++  READ(2),
++  WRITE(3),
++  BULK_IMPORT(4),
++  ALTER_TABLE(5),
++  GRANT(6),
++  DROP_TABLE(7);
++
++  private final int value;
++
++  private PTablePermission(int value) {
++    this.value = value;
++  }
++
++  /**
++   * Get the integer value of this enum value, as defined in the Thrift IDL.
++   */
++  public int getValue() {
++    return value;
++  }
++
++  /**
++   * Find a the enum type by its integer value, as defined in the Thrift IDL.
++   * @return null if the value is not found.
++   */
++  public static PTablePermission findByValue(int value) { 
++    switch (value) {
++      case 2:
++        return READ;
++      case 3:
++        return WRITE;
++      case 4:
++        return BULK_IMPORT;
++      case 5:
++        return ALTER_TABLE;
++      case 6:
++        return GRANT;
++      case 7:
++        return DROP_TABLE;
++      default:
++        return null;
++    }
++  }
++}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/test/java/org/apache/accumulo/TestProxyInstanceOperations.java
----------------------------------------------------------------------
diff --cc proxy/src/test/java/org/apache/accumulo/TestProxyInstanceOperations.java
index 0000000,0000000..a746ad6
new file mode 100644
--- /dev/null
+++ b/proxy/src/test/java/org/apache/accumulo/TestProxyInstanceOperations.java
@@@ -1,0 -1,0 +1,82 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.accumulo;
++
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertNull;
++import static org.junit.Assert.assertTrue;
++
++import java.nio.ByteBuffer;
++import java.util.Properties;
++
++import org.apache.accumulo.proxy.Proxy;
++import org.apache.accumulo.proxy.TestProxyClient;
++import org.apache.accumulo.proxy.thrift.UserPass;
++import org.apache.thrift.TException;
++import org.apache.thrift.server.TServer;
++import org.junit.AfterClass;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++public class TestProxyInstanceOperations {
++  protected static TServer proxy;
++  protected static Thread thread;
++  protected static TestProxyClient tpc;
++  protected static UserPass userpass;
++  protected static final int port = 10197;
++  
++  @BeforeClass
++  public static void setup() throws Exception {
++    Properties prop = new Properties();
++    prop.setProperty("org.apache.accumulo.proxy.ProxyServer.useMockInstance", "true");
++    
++    proxy = Proxy.createProxyServer(Class.forName("org.apache.accumulo.proxy.thrift.AccumuloProxy"),
++        Class.forName("org.apache.accumulo.proxy.ProxyServer"), port, prop);
++    thread = new Thread() {
++      @Override
++      public void run() {
++        proxy.serve();
++      }
++    };
++    thread.start();
++    tpc = new TestProxyClient("localhost", port);
++    userpass = new UserPass("root", ByteBuffer.wrap("".getBytes()));
++  }
++  
++  @AfterClass
++  public static void tearDown() throws InterruptedException {
++    proxy.stop();
++    thread.join();
++  }
++  
++  @Test
++  public void properties() throws TException {
++    tpc.proxy().instanceOperations_setProperty(userpass, "test.systemprop", "whistletips");
++    
++    assertEquals(tpc.proxy().instanceOperations_getSystemConfiguration(userpass).get("test.systemprop"), "whistletips");
++    tpc.proxy().instanceOperations_removeProperty(userpass, "test.systemprop");
++    assertNull(tpc.proxy().instanceOperations_getSystemConfiguration(userpass).get("test.systemprop"));
++    
++  }
++  
++  @Test
++  public void testClassLoad() throws TException {
++    assertTrue(tpc.proxy().instanceOperations_testClassLoad(userpass, "org.apache.accumulo.core.iterators.user.RegExFilter",
++        "org.apache.accumulo.core.iterators.Filter"));
++  }
++  
++}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/test/java/org/apache/accumulo/TestProxyReadWrite.java
----------------------------------------------------------------------
diff --cc proxy/src/test/java/org/apache/accumulo/TestProxyReadWrite.java
index 0000000,0000000..0b55261
new file mode 100644
--- /dev/null
+++ b/proxy/src/test/java/org/apache/accumulo/TestProxyReadWrite.java
@@@ -1,0 -1,0 +1,388 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.accumulo;
++
++import static org.junit.Assert.assertEquals;
++
++import java.nio.ByteBuffer;
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Map;
++import java.util.Properties;
++import java.util.Set;
++
++import org.apache.accumulo.core.client.IteratorSetting;
++import org.apache.accumulo.core.iterators.user.RegExFilter;
++import org.apache.accumulo.proxy.Proxy;
++import org.apache.accumulo.proxy.TestProxyClient;
++import org.apache.accumulo.proxy.Util;
++import org.apache.accumulo.proxy.thrift.PColumnUpdate;
++import org.apache.accumulo.proxy.thrift.PIteratorSetting;
++import org.apache.accumulo.proxy.thrift.PKey;
++import org.apache.accumulo.proxy.thrift.PKeyValue;
++import org.apache.accumulo.proxy.thrift.PRange;
++import org.apache.accumulo.proxy.thrift.PScanResult;
++import org.apache.accumulo.proxy.thrift.UserPass;
++import org.apache.thrift.server.TServer;
++import org.junit.After;
++import org.junit.AfterClass;
++import org.junit.Before;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++public class TestProxyReadWrite {
++  protected static TServer proxy;
++  protected static Thread thread;
++  protected static TestProxyClient tpc;
++  protected static UserPass userpass;
++  protected static final int port = 10194;
++  protected static final String testtable = "testtable";
++  
++  @BeforeClass
++  public static void setup() throws Exception {
++    Properties prop = new Properties();
++    prop.setProperty("org.apache.accumulo.proxy.ProxyServer.useMockInstance", "true");
++    
++    proxy = Proxy.createProxyServer(Class.forName("org.apache.accumulo.proxy.thrift.AccumuloProxy"),
++        Class.forName("org.apache.accumulo.proxy.ProxyServer"), port, prop);
++    thread = new Thread() {
++      @Override
++      public void run() {
++        proxy.serve();
++      }
++    };
++    thread.start();
++    tpc = new TestProxyClient("localhost", port);
++    userpass = new UserPass("root", ByteBuffer.wrap("".getBytes()));
++  }
++  
++  @AfterClass
++  public static void tearDown() throws InterruptedException {
++    proxy.stop();
++    thread.join();
++  }
++  
++  @Before
++  public void makeTestTable() throws Exception {
++    tpc.proxy().tableOperations_create(userpass, testtable);
++  }
++  
++  @After
++  public void deleteTestTable() throws Exception {
++    tpc.proxy().tableOperations_delete(userpass, testtable);
++  }
++  
++  private static void addMutation(Map<ByteBuffer,List<PColumnUpdate>> mutations, String row, String cf, String cq, String value) {
++    PColumnUpdate update = new PColumnUpdate(ByteBuffer.wrap(cf.getBytes()), ByteBuffer.wrap(cq.getBytes()), ByteBuffer.wrap(value.getBytes()));
++    mutations.put(ByteBuffer.wrap(row.getBytes()), Collections.singletonList(update));
++  }
++  
++  private static void addMutation(Map<ByteBuffer,List<PColumnUpdate>> mutations, String row, String cf, String cq, String vis, String value) {
++    PColumnUpdate update = new PColumnUpdate(ByteBuffer.wrap(cf.getBytes()), ByteBuffer.wrap(cq.getBytes()), ByteBuffer.wrap(value.getBytes()));
++    update.setColVisibility(vis.getBytes());
++    mutations.put(ByteBuffer.wrap(row.getBytes()), Collections.singletonList(update));
++  }
++  
++  /**
++   * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a range so only the entries between -Inf...5 come back (there should be
++   * 50,000)
++   * 
++   * @throws Exception
++   */
++  @Test
++  public void readWriteBatchOneShotWithRange() throws Exception {
++    int maxInserts = 100000;
++    Map<ByteBuffer,List<PColumnUpdate>> mutations = new HashMap<ByteBuffer,List<PColumnUpdate>>();
++    String format = "%1$05d";
++    for (int i = 0; i < maxInserts; i++) {
++      addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
++      
++      if (i % 1000 == 0 || i == maxInserts - 1) {
++        tpc.proxy().updateAndFlush(userpass, testtable, mutations, null);
++        mutations.clear();
++      }
++    }
++    
++    PKey stop = new PKey();
++    stop.setRow("5".getBytes());
++    List<PRange> pranges = new ArrayList<PRange>();
++    pranges.add(new PRange(null, stop));
++    String cookie = tpc.proxy().createBatchScanner(userpass, testtable, null, null, pranges);
++    
++    int i = 0;
++    boolean hasNext = true;
++    
++    int k = 1000;
++    while (hasNext) {
++      PScanResult kvList = tpc.proxy().scanner_next_k(cookie, k);
++      i += kvList.getResultsSize();
++      hasNext = kvList.isMore();
++    }
++    assertEquals(i, 50000);
++  }
++  
++  /**
++   * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Filter the results so only the even numbers come back.
++   * 
++   * @throws Exception
++   */
++  @Test
++  public void readWriteBatchOneShotWithFilterIterator() throws Exception {
++    int maxInserts = 10000;
++    Map<ByteBuffer,List<PColumnUpdate>> mutations = new HashMap<ByteBuffer,List<PColumnUpdate>>();
++    String format = "%1$05d";
++    for (int i = 0; i < maxInserts; i++) {
++      addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
++      
++      if (i % 1000 == 0 || i == maxInserts - 1) {
++        tpc.proxy().updateAndFlush(userpass, testtable, mutations, null);
++        mutations.clear();
++      }
++      
++    }
++    
++    String regex = ".*[02468]";
++    
++    IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
++    RegExFilter.setRegexs(is, regex, null, null, null, false);
++    
++    PIteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
++    String cookie = tpc.proxy().createBatchScanner(userpass, testtable, null, pis, null);
++    
++    int i = 0;
++    boolean hasNext = true;
++    
++    int k = 1000;
++    while (hasNext) {
++      PScanResult kvList = tpc.proxy().scanner_next_k(cookie, k);
++      for (PKeyValue kv : kvList.getResults()) {
++        assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
++        
++        i += 2;
++      }
++      hasNext = kvList.isMore();
++    }
++  }
++  
++  @Test
++  public void readWriteOneShotWithRange() throws Exception {
++    int maxInserts = 100000;
++    Map<ByteBuffer,List<PColumnUpdate>> mutations = new HashMap<ByteBuffer,List<PColumnUpdate>>();
++    String format = "%1$05d";
++    for (int i = 0; i < maxInserts; i++) {
++      addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
++      
++      if (i % 1000 == 0 || i == maxInserts - 1) {
++        tpc.proxy().updateAndFlush(userpass, testtable, mutations, null);
++        mutations.clear();
++      }
++    }
++    
++    PKey stop = new PKey();
++    stop.setRow("5".getBytes());
++    String cookie = tpc.proxy().createScanner(userpass, testtable, null, null, new PRange(null, stop));
++    
++    int i = 0;
++    boolean hasNext = true;
++    
++    int k = 1000;
++    while (hasNext) {
++      PScanResult kvList = tpc.proxy().scanner_next_k(cookie, k);
++      i += kvList.getResultsSize();
++      hasNext = kvList.isMore();
++    }
++    assertEquals(i, 50000);
++  }
++  
++  /**
++   * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Filter the results so only the even numbers come back.
++   * 
++   * @throws Exception
++   */
++  @Test
++  public void readWriteOneShotWithFilterIterator() throws Exception {
++    int maxInserts = 10000;
++    Map<ByteBuffer,List<PColumnUpdate>> mutations = new HashMap<ByteBuffer,List<PColumnUpdate>>();
++    String format = "%1$05d";
++    for (int i = 0; i < maxInserts; i++) {
++      addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
++      
++      if (i % 1000 == 0 || i == maxInserts - 1) {
++        
++        tpc.proxy().updateAndFlush(userpass, testtable, mutations, null);
++        mutations.clear();
++        
++      }
++      
++    }
++    
++    String regex = ".*[02468]";
++    
++    IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
++    RegExFilter.setRegexs(is, regex, null, null, null, false);
++    
++    PIteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
++    String cookie = tpc.proxy().createScanner(userpass, testtable, null, pis, null);
++    
++    int i = 0;
++    boolean hasNext = true;
++    
++    int k = 1000;
++    while (hasNext) {
++      PScanResult kvList = tpc.proxy().scanner_next_k(cookie, k);
++      for (PKeyValue kv : kvList.getResults()) {
++        assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
++        
++        i += 2;
++      }
++      hasNext = kvList.isMore();
++    }
++  }
++  
++  // @Test
++  // This test takes kind of a long time. Enable it if you think you may have memory issues.
++  public void manyWritesAndReads() throws Exception {
++    int maxInserts = 1000000;
++    Map<ByteBuffer,List<PColumnUpdate>> mutations = new HashMap<ByteBuffer,List<PColumnUpdate>>();
++    String format = "%1$06d";
++    String writer = tpc.proxy().createWriter(userpass, testtable);
++    for (int i = 0; i < maxInserts; i++) {
++      addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
++      
++      if (i % 1000 == 0 || i == maxInserts - 1) {
++        
++        tpc.proxy().writer_update(writer, mutations, null);
++        mutations.clear();
++        
++      }
++      
++    }
++    
++    tpc.proxy().writer_flush(writer);
++    tpc.proxy().writer_close(writer);
++    
++    String cookie = tpc.proxy().createBatchScanner(userpass, testtable, null, null, null);
++    
++    int i = 0;
++    boolean hasNext = true;
++    
++    int k = 1000;
++    while (hasNext) {
++      PScanResult kvList = tpc.proxy().scanner_next_k(cookie, k);
++      for (PKeyValue kv : kvList.getResults()) {
++        assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
++        i++;
++      }
++      hasNext = kvList.isMore();
++      if (hasNext)
++        assertEquals(k, kvList.getResults().size());
++    }
++    assertEquals(maxInserts, i);
++  }
++  
++  @Test
++  public void asynchReadWrite() throws Exception {
++    int maxInserts = 10000;
++    Map<ByteBuffer,List<PColumnUpdate>> mutations = new HashMap<ByteBuffer,List<PColumnUpdate>>();
++    String format = "%1$05d";
++    String writer = tpc.proxy().createWriter(userpass, testtable);
++    for (int i = 0; i < maxInserts; i++) {
++      addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
++      
++      if (i % 1000 == 0 || i == maxInserts - 1) {
++        tpc.proxy().writer_update(writer, mutations, null);
++        mutations.clear();
++      }
++    }
++    
++    tpc.proxy().writer_flush(writer);
++    tpc.proxy().writer_close(writer);
++    
++    String regex = ".*[02468]";
++    
++    IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
++    RegExFilter.setRegexs(is, regex, null, null, null, false);
++    
++    PIteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
++    String cookie = tpc.proxy().createBatchScanner(userpass, testtable, null, pis, null);
++    
++    int i = 0;
++    boolean hasNext = true;
++    
++    int k = 1000;
++    int numRead = 0;
++    while (hasNext) {
++      PScanResult kvList = tpc.proxy().scanner_next_k(cookie, k);
++      for (PKeyValue kv : kvList.getResults()) {
++        assertEquals(i, Integer.parseInt(new String(kv.getKey().getRow())));
++        numRead++;
++        i += 2;
++      }
++      hasNext = kvList.isMore();
++    }
++    assertEquals(maxInserts / 2, numRead);
++  }
++  
++  @Test
++  public void testVisibility() throws Exception {
++    
++    Set<String> auths = new HashSet<String>();
++    auths.add("even");
++    tpc.proxy().securityOperations_changeUserAuthorizations(userpass, "root", auths);
++    
++    int maxInserts = 10000;
++    Map<ByteBuffer,List<PColumnUpdate>> mutations = new HashMap<ByteBuffer,List<PColumnUpdate>>();
++    String format = "%1$05d";
++    String writer = tpc.proxy().createWriter(userpass, testtable);
++    for (int i = 0; i < maxInserts; i++) {
++      if (i % 2 == 0)
++        addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, "even", Util.randString(10));
++      else
++        addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, "odd", Util.randString(10));
++      
++      if (i % 1000 == 0 || i == maxInserts - 1) {
++        tpc.proxy().writer_update(writer, mutations, null);
++        mutations.clear();
++      }
++    }
++    
++    tpc.proxy().writer_flush(writer);
++    tpc.proxy().writer_close(writer);
++    String cookie = tpc.proxy().createBatchScanner(userpass, testtable, auths, null, null);
++    
++    int i = 0;
++    boolean hasNext = true;
++    
++    int k = 1000;
++    int numRead = 0;
++    while (hasNext) {
++      PScanResult kvList = tpc.proxy().scanner_next_k(cookie, k);
++      for (PKeyValue kv : kvList.getResults()) {
++        assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
++        i += 2;
++        numRead++;
++      }
++      hasNext = kvList.isMore();
++      
++    }
++    assertEquals(maxInserts / 2, numRead);
++  }
++  
++}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/test/java/org/apache/accumulo/TestProxySecurityOperations.java
----------------------------------------------------------------------
diff --cc proxy/src/test/java/org/apache/accumulo/TestProxySecurityOperations.java
index 0000000,0000000..b221285
new file mode 100644
--- /dev/null
+++ b/proxy/src/test/java/org/apache/accumulo/TestProxySecurityOperations.java
@@@ -1,0 -1,0 +1,142 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.accumulo;
++
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertFalse;
++import static org.junit.Assert.assertTrue;
++
++import java.nio.ByteBuffer;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Properties;
++
++import org.apache.accumulo.proxy.Proxy;
++import org.apache.accumulo.proxy.TestProxyClient;
++import org.apache.accumulo.proxy.thrift.PSystemPermission;
++import org.apache.accumulo.proxy.thrift.PTablePermission;
++import org.apache.accumulo.proxy.thrift.UserPass;
++import org.apache.thrift.TException;
++import org.apache.thrift.server.TServer;
++import org.junit.After;
++import org.junit.AfterClass;
++import org.junit.Before;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++public class TestProxySecurityOperations {
++  protected static TServer proxy;
++  protected static Thread thread;
++  protected static TestProxyClient tpc;
++  protected static UserPass userpass;
++  protected static final int port = 10196;
++  protected static final String testtable = "testtable";
++  protected static final String testuser = "VonJines";
++  protected static final ByteBuffer testpw = ByteBuffer.wrap("fiveones".getBytes());
++  
++  @BeforeClass
++  public static void setup() throws Exception {
++    Properties prop = new Properties();
++    prop.setProperty("org.apache.accumulo.proxy.ProxyServer.useMockInstance", "true");
++    
++    proxy = Proxy.createProxyServer(Class.forName("org.apache.accumulo.proxy.thrift.AccumuloProxy"),
++        Class.forName("org.apache.accumulo.proxy.ProxyServer"), port, prop);
++    thread = new Thread() {
++      @Override
++      public void run() {
++        proxy.serve();
++      }
++    };
++    thread.start();
++    
++    tpc = new TestProxyClient("localhost", port);
++    userpass = new UserPass("root", ByteBuffer.wrap("".getBytes()));
++  }
++  
++  @AfterClass
++  public static void tearDown() throws InterruptedException {
++    proxy.stop();
++    thread.join();
++  }
++  
++  @Before
++  public void makeTestTableAndUser() throws Exception {
++    tpc.proxy().tableOperations_create(userpass, testtable);
++    tpc.proxy().securityOperations_createUser(userpass, testuser, testpw, new HashSet<String>());
++  }
++  
++  @After
++  public void deleteTestTable() throws Exception {
++    tpc.proxy().tableOperations_delete(userpass, testtable);
++    tpc.proxy().securityOperations_dropUser(userpass, testuser);
++  }
++  
++  @Test
++  public void create() throws TException {
++    tpc.proxy().securityOperations_createUser(userpass, testuser + "2", testpw, new HashSet<String>());
++    assertTrue(tpc.proxy().securityOperations_listUsers(userpass).contains(testuser + "2"));
++    tpc.proxy().securityOperations_dropUser(userpass, testuser + "2");
++    assertTrue(!tpc.proxy().securityOperations_listUsers(userpass).contains(testuser + "2"));
++  }
++  
++  @Test
++  public void authenticate() throws TException {
++    assertTrue(tpc.proxy().securityOperations_authenticateUser(userpass, testuser, testpw));
++    assertFalse(tpc.proxy().securityOperations_authenticateUser(userpass, "EvilUser", testpw));
++    
++    tpc.proxy().securityOperations_changeUserPassword(userpass, testuser, ByteBuffer.wrap("newpass".getBytes()));
++    assertFalse(tpc.proxy().securityOperations_authenticateUser(userpass, testuser, testpw));
++    assertTrue(tpc.proxy().securityOperations_authenticateUser(userpass, testuser, ByteBuffer.wrap("newpass".getBytes())));
++    
++  }
++  
++  @Test
++  public void tablePermissions() throws TException {
++    tpc.proxy().securityOperations_grantTablePermission(userpass, testuser, testtable, PTablePermission.ALTER_TABLE);
++    assertTrue(tpc.proxy().securityOperations_hasTablePermission(userpass, testuser, testtable, PTablePermission.ALTER_TABLE));
++    
++    tpc.proxy().securityOperations_revokeTablePermission(userpass, testuser, testtable, PTablePermission.ALTER_TABLE);
++    assertFalse(tpc.proxy().securityOperations_hasTablePermission(userpass, testuser, testtable, PTablePermission.ALTER_TABLE));
++    
++  }
++  
++  @Test
++  public void systemPermissions() throws TException {
++    tpc.proxy().securityOperations_grantSystemPermission(userpass, testuser, PSystemPermission.ALTER_USER);
++    assertTrue(tpc.proxy().securityOperations_hasSystemPermission(userpass, testuser, PSystemPermission.ALTER_USER));
++    
++    tpc.proxy().securityOperations_revokeSystemPermission(userpass, testuser, PSystemPermission.ALTER_USER);
++    assertFalse(tpc.proxy().securityOperations_hasSystemPermission(userpass, testuser, PSystemPermission.ALTER_USER));
++    
++  }
++  
++  @Test
++  public void auths() throws TException {
++    HashSet<String> newauths = new HashSet<String>();
++    newauths.add("BBR");
++    newauths.add("Barney");
++    tpc.proxy().securityOperations_changeUserAuthorizations(userpass, testuser, newauths);
++    List<ByteBuffer> actualauths = tpc.proxy().securityOperations_getUserAuthorizations(userpass, testuser);
++    assertEquals(actualauths.size(), newauths.size());
++    
++    for (ByteBuffer auth : actualauths) {
++      System.out.println(auth);
++      assertTrue(newauths.contains(new String(auth.array())));
++    }
++  }
++  
++}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/test/java/org/apache/accumulo/TestProxyTableOperations.java
----------------------------------------------------------------------
diff --cc proxy/src/test/java/org/apache/accumulo/TestProxyTableOperations.java
index 0000000,0000000..8904b06
new file mode 100644
--- /dev/null
+++ b/proxy/src/test/java/org/apache/accumulo/TestProxyTableOperations.java
@@@ -1,0 -1,0 +1,219 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++package org.apache.accumulo;
++
++import static org.junit.Assert.assertEquals;
++import static org.junit.Assert.assertFalse;
++import static org.junit.Assert.assertNull;
++import static org.junit.Assert.assertTrue;
++
++import java.nio.ByteBuffer;
++import java.util.Collections;
++import java.util.HashMap;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Map;
++import java.util.Properties;
++import java.util.Set;
++
++import org.apache.accumulo.proxy.Proxy;
++import org.apache.accumulo.proxy.TestProxyClient;
++import org.apache.accumulo.proxy.thrift.PColumnUpdate;
++import org.apache.accumulo.proxy.thrift.UserPass;
++import org.apache.thrift.TException;
++import org.apache.thrift.server.TServer;
++import org.junit.After;
++import org.junit.AfterClass;
++import org.junit.Before;
++import org.junit.BeforeClass;
++import org.junit.Test;
++
++public class TestProxyTableOperations {
++  
++  protected static TServer proxy;
++  protected static Thread thread;
++  protected static TestProxyClient tpc;
++  protected static UserPass userpass;
++  protected static final int port = 10195;
++  protected static final String testtable = "testtable";
++  
++  @BeforeClass
++  public static void setup() throws Exception {
++    Properties prop = new Properties();
++    prop.setProperty("org.apache.accumulo.proxy.ProxyServer.useMockInstance", "true");
++    
++    proxy = Proxy.createProxyServer(Class.forName("org.apache.accumulo.proxy.thrift.AccumuloProxy"),
++        Class.forName("org.apache.accumulo.proxy.ProxyServer"), port, prop);
++    thread = new Thread() {
++      @Override
++      public void run() {
++        proxy.serve();
++      }
++    };
++    thread.start();
++    tpc = new TestProxyClient("localhost", port);
++    userpass = new UserPass("root", ByteBuffer.wrap("".getBytes()));
++  }
++  
++  @AfterClass
++  public static void tearDown() throws InterruptedException {
++    proxy.stop();
++    thread.join();
++  }
++  
++  @Before
++  public void makeTestTable() throws Exception {
++    tpc.proxy().tableOperations_create(userpass, testtable);
++  }
++  
++  @After
++  public void deleteTestTable() throws Exception {
++    tpc.proxy().tableOperations_delete(userpass, testtable);
++  }
++  
++  @Test
++  public void ping() throws Exception {
++    tpc.proxy().ping(userpass);
++  }
++  
++  @Test
++  public void createExistsDelete() throws TException {
++    assertFalse(tpc.proxy().tableOperations_exists(userpass, "testtable2"));
++    tpc.proxy().tableOperations_create(userpass, "testtable2");
++    assertTrue(tpc.proxy().tableOperations_exists(userpass, "testtable2"));
++    tpc.proxy().tableOperations_delete(userpass, "testtable2");
++    assertFalse(tpc.proxy().tableOperations_exists(userpass, "testtable2"));
++  }
++  
++  @Test
++  public void listRename() throws TException {
++    assertFalse(tpc.proxy().tableOperations_exists(userpass, "testtable2"));
++    tpc.proxy().tableOperations_rename(userpass, testtable, "testtable2");
++    assertTrue(tpc.proxy().tableOperations_exists(userpass, "testtable2"));
++    tpc.proxy().tableOperations_rename(userpass, "testtable2", testtable);
++    assertTrue(tpc.proxy().tableOperations_list(userpass).contains("testtable"));
++    
++  }
++  
++  // This test does not yet function because the backing Mock instance does not yet support merging
++  // TODO: add back in as a test when Mock is improved
++  // @Test
++  public void merge() throws TException {
++    Set<String> splits = new HashSet<String>();
++    splits.add("a");
++    splits.add("c");
++    splits.add("z");
++    tpc.proxy().tableOperations_addSplits(userpass, testtable, splits);
++    
++    tpc.proxy().tableOperations_merge(userpass, testtable, "b", "d");
++    
++    splits.remove("c");
++    
++    List<String> tableSplits = tpc.proxy().tableOperations_getSplits(userpass, testtable, 10);
++    
++    for (String split : tableSplits)
++      assertTrue(splits.contains(split));
++    assertTrue(tableSplits.size() == splits.size());
++    
++  }
++  
++  @Test
++  public void splits() throws TException {
++    Set<String> splits = new HashSet<String>();
++    splits.add("a");
++    splits.add("b");
++    splits.add("z");
++    tpc.proxy().tableOperations_addSplits(userpass, testtable, splits);
++    
++    List<String> tableSplits = tpc.proxy().tableOperations_getSplits(userpass, testtable, 10);
++    
++    for (String split : tableSplits)
++      assertTrue(splits.contains(split));
++    assertTrue(tableSplits.size() == splits.size());
++  }
++  
++  @Test
++  public void constraints() throws TException {
++    int cid = tpc.proxy().tableOperations_addConstraint(userpass, testtable, "org.apache.accumulo.TestConstraint");
++    Map<String,Integer> constraints = tpc.proxy().tableOperations_listConstraints(userpass, testtable);
++    assertEquals((int) constraints.get("org.apache.accumulo.TestConstraint"), cid);
++    tpc.proxy().tableOperations_removeConstraint(userpass, testtable, cid);
++    constraints = tpc.proxy().tableOperations_listConstraints(userpass, testtable);
++    assertNull(constraints.get("org.apache.accumulo.TestConstraint"));
++  }
++  
++  // This test does not yet function because the backing Mock instance does not yet support locality groups
++  // TODO: add back in as a test when Mock is improved
++  // @Test
++  public void localityGroups() throws TException {
++    Map<String,Set<String>> groups = new HashMap<String,Set<String>>();
++    Set<String> group1 = new HashSet<String>();
++    group1.add("cf1");
++    groups.put("group1", group1);
++    Set<String> group2 = new HashSet<String>();
++    group2.add("cf2");
++    group2.add("cf3");
++    groups.put("group2", group2);
++    tpc.proxy().tableOperations_setLocalityGroups(userpass, testtable, groups);
++    
++    Map<String,Set<String>> actualGroups = tpc.proxy().tableOperations_getLocalityGroups(userpass, testtable);
++    
++    assertEquals(groups.size(), actualGroups.size());
++    for (String groupName : groups.keySet()) {
++      assertTrue(actualGroups.containsKey(groupName));
++      assertEquals(groups.get(groupName).size(), actualGroups.get(groupName).size());
++      for (String cf : groups.get(groupName)) {
++        assertTrue(actualGroups.get(groupName).contains(cf));
++      }
++    }
++  }
++  
++  @Test
++  public void tableProperties() throws TException {
++    tpc.proxy().tableOperations_setProperty(userpass, testtable, "test.property1", "wharrrgarbl");
++    assertEquals(tpc.proxy().tableOperations_getProperties(userpass, testtable).get("test.property1"), "wharrrgarbl");
++    tpc.proxy().tableOperations_removeProperty(userpass, testtable, "test.property1");
++    assertNull(tpc.proxy().tableOperations_getProperties(userpass, testtable).get("test.property1"));
++  }
++  
++  private static void addMutation(Map<ByteBuffer,List<PColumnUpdate>> mutations, String row, String cf, String cq, String value) {
++    PColumnUpdate update = new PColumnUpdate(ByteBuffer.wrap(cf.getBytes()), ByteBuffer.wrap(cq.getBytes()), ByteBuffer.wrap(value.getBytes()));
++    mutations.put(ByteBuffer.wrap(row.getBytes()), Collections.singletonList(update));
++  }
++  
++  @Test
++  public void tableOperationsRowMethods() throws TException {
++    List<ByteBuffer> auths = tpc.proxy().securityOperations_getUserAuthorizations(userpass, "root");
++    // System.out.println(auths);
++    Map<ByteBuffer,List<PColumnUpdate>> mutations = new HashMap<ByteBuffer,List<PColumnUpdate>>();
++    for (int i = 0; i < 10; i++) {
++      addMutation(mutations, "" + i, "cf", "cq", "");
++    }
++    tpc.proxy().updateAndFlush(userpass, testtable, mutations, null);
++    
++    assertEquals(tpc.proxy().tableOperations_getMaxRow(userpass, testtable, auths, null, true, null, true), "9");
++    
++    // TODO: Uncomment when the Mock isn't broken
++    // tpc.proxy().tableOperations_deleteRows(userpass,testtable,"51","99");
++    // assertEquals(tpc.proxy().tableOperations_getMaxRow(userpass, testtable, auths, null, true, null, true),"5");
++    
++  }
++  
++  /*
++   * @Test(expected = TException.class) public void peekTest() { }
++   */
++}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/server/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/server/src/main/c++/nativeMap/Makefile
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/start/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/test/pom.xml
----------------------------------------------------------------------
diff --cc test/pom.xml
index 0000000,ba189ce..2d309b7
mode 000000,100644..100644
--- a/test/pom.xml
+++ b/test/pom.xml
@@@ -1,0 -1,112 +1,112 @@@
+ <?xml version="1.0" encoding="UTF-8"?>
+ <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+ 
+       http://www.apache.org/licenses/LICENSE-2.0
+ 
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ -->
+ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ 
+   <parent>
+     <groupId>org.apache.accumulo</groupId>
+     <artifactId>accumulo</artifactId>
 -    <version>1.5.0-SNAPSHOT</version>
++    <version>ACCUMULO-652-SNAPSHOT</version>
+   </parent>
+ 
+   <modelVersion>4.0.0</modelVersion>
+   <artifactId>accumulo-test</artifactId>
+   <name>accumulo-test</name>
+ 
+   <build>
+     <pluginManagement>
+       <plugins>
+         <plugin>
+           <artifactId>maven-jar-plugin</artifactId>
+           <configuration>
+             <outputDirectory>../lib</outputDirectory>
+           </configuration>
+         </plugin>
+       </plugins>
+     </pluginManagement>
+     <plugins>
+       <plugin>
+         <groupId>org.apache.maven.plugins</groupId>
+         <artifactId>maven-jar-plugin</artifactId>
+         <configuration>
+           <archive>
+             <manifestSections>
+               <manifestSection>
+                 <name>accumulo/test/</name>
+                 <manifestEntries>
+                   <Sealed>true</Sealed>
+                 </manifestEntries>
+               </manifestSection>
+             </manifestSections>
+           </archive>
+         </configuration>
+       </plugin>
+     </plugins>
+   </build>
+   
+   <profiles>
+     <!-- profile for building against Hadoop 1.0.x
+     Activate by not specifying hadoop.profile -->
+     <profile>
+       <id>hadoop-1.0</id>
+       <activation>
+         <property>
+           <name>!hadoop.profile</name>
+         </property>
+       </activation>
+       <dependencies>
+         <dependency>
+           <groupId>org.apache.hadoop</groupId>
+           <artifactId>hadoop-core</artifactId>
+         </dependency>
+       </dependencies>
+     </profile>
+     <!-- profile for building against Hadoop 2.0.x
+     Activate using: mvn -Dhadoop.profile=2.0 -->
+     <profile>
+       <id>hadoop-2.0</id>
+       <activation>
+         <property>
+           <name>hadoop.profile</name>
+           <value>2.0</value>
+         </property>
+       </activation>
+       <dependencies>
+         <dependency>
+           <groupId>org.apache.hadoop</groupId>
+           <artifactId>hadoop-client</artifactId>
+         </dependency>
+       </dependencies>
+     </profile>
+   </profiles>
+   
+   <dependencies>
+     <dependency>
+       <groupId>org.apache.accumulo</groupId>
+       <artifactId>accumulo-core</artifactId>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.accumulo</groupId>
+       <artifactId>accumulo-server</artifactId>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.zookeeper</groupId>
+       <artifactId>zookeeper</artifactId>
+     </dependency>
+   </dependencies>
+ 
+ </project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/trace/pom.xml
----------------------------------------------------------------------


[14/15] ACCUMULO-652 merged changes from trunk

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
index 0000000,ead0964..85bd74a
mode 000000,100644..100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
@@@ -1,0 -1,661 +1,656 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.accumulo.core.iterators.user;
+ 
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Map;
+ import java.util.NoSuchElementException;
+ 
+ import org.apache.accumulo.core.client.IteratorSetting;
+ import org.apache.accumulo.core.conf.AccumuloConfiguration;
+ import org.apache.accumulo.core.data.ByteSequence;
+ import org.apache.accumulo.core.data.Key;
+ import org.apache.accumulo.core.data.PartialKey;
+ import org.apache.accumulo.core.data.Range;
+ import org.apache.accumulo.core.data.Value;
+ import org.apache.accumulo.core.iterators.IteratorEnvironment;
+ import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+ import org.apache.accumulo.core.iterators.OptionDescriber;
+ import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+ import org.apache.accumulo.core.iterators.WrappingIterator;
+ import org.apache.accumulo.core.security.Authorizations;
+ import org.apache.accumulo.core.security.ColumnVisibility;
 -import org.apache.accumulo.core.security.VisibilityEvaluator;
 -import org.apache.accumulo.core.security.VisibilityParseException;
+ import org.apache.accumulo.core.util.BadArgumentException;
+ import org.apache.accumulo.core.util.Pair;
+ import org.apache.commons.collections.BufferOverflowException;
+ import org.apache.commons.collections.map.LRUMap;
+ import org.apache.hadoop.io.Text;
+ import org.apache.log4j.Logger;
+ 
+ /**
+  * The TransformingIterator allows portions of a key (except for the row) to be transformed. This iterator handles the details that come with modifying keys
+  * (i.e., that the sort order could change). In order to do so, however, the iterator must put all keys sharing the same prefix in memory. Prefix is defined as
+  * the parts of the key that are not modified by this iterator. That is, if the iterator modifies column qualifier and timestamp, then the prefix is row and
+  * column family. In that case, the iterator must load all column qualifiers for each row/column family pair into memory. Given this constraint, care must be
+  * taken by users of this iterator to ensure it is not run in such a way that will overrun memory in a tablet server.
+  * <p>
+  * If the implementing iterator is transforming column families, then it must also override {@code untransformColumnFamilies(Collection)} to handle the case
+  * when column families are fetched at scan time. The fetched column families will/must be in the transformed space, and the untransformed column families need
+  * to be passed to this iterator's source. If it is not possible to write a reverse transformation (e.g., the column family transformation depends on the row
+  * value or something like that), then the iterator must not fetch specific column families (or only fetch column families that are known to not transform at
+  * all).
+  * <p>
+  * If the implementing iterator is transforming column visibilities, then users must be careful NOT to fetch column qualifiers from the scanner. The reason for
+  * this is due to ACCUMULO-??? (insert issue number).
+  * <p>
+  * If the implementing iterator is transforming column visibilities, then the user should be sure to supply authorizations via the {@link #AUTH_OPT} iterator
+  * option (note that this is only necessary for scan scope iterators). The supplied authorizations should be in the transformed space, but the authorizations
+  * supplied to the scanner should be in the untransformed space. That is, if the iterator transforms A to 1, B to 2, C to 3, etc, then the auths supplied when
+  * the scanner is constructed should be A,B,C,... and the auths supplied to the iterator should be 1,2,3,... The reason for this is that the scanner performs
+  * security filtering before this iterator is called, so the authorizations need to be in the original untransformed space. Since the iterator can transform
+  * visibilities, it is possible that it could produce visibilities that the user cannot see, so the transformed keys must be tested to ensure the user is
+  * allowed to view them. Note that this test is not necessary when the iterator is not used in the scan scope since no security filtering is performed during
+  * major and minor compactions. It should also be noted that this iterator implements the security filtering rather than relying on a follow-on iterator to do
+  * it so that we ensure the test is performed.
+  */
+ abstract public class TransformingIterator extends WrappingIterator implements OptionDescriber {
+   public static final String AUTH_OPT = "authorizations";
+   public static final String MAX_BUFFER_SIZE_OPT = "maxBufferSize";
+   private static final long DEFAULT_MAX_BUFFER_SIZE = 10000000;
+ 
+   protected Logger log = Logger.getLogger(getClass());
+   
+   protected ArrayList<Pair<Key,Value>> keys = new ArrayList<Pair<Key,Value>>();
+   protected int keyPos = -1;
+   protected boolean scanning;
+   protected Range seekRange;
+   protected Collection<ByteSequence> seekColumnFamilies;
+   protected boolean seekColumnFamiliesInclusive;
+   
 -  private VisibilityEvaluator ve = null;
++  private Authorizations auths = null;
+   private LRUMap visibleCache = null;
+   private LRUMap parsedVisibilitiesCache = null;
+   private long maxBufferSize;
+   
+   private static Comparator<Pair<Key,Value>> keyComparator = new Comparator<Pair<Key,Value>>() {
+     @Override
+     public int compare(Pair<Key,Value> o1, Pair<Key,Value> o2) {
+       return o1.getFirst().compareTo(o2.getFirst());
+     }
+   };
+   
+   public TransformingIterator() {}
+   
+   @Override
+   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
+     super.init(source, options, env);
+     scanning = IteratorScope.scan.equals(env.getIteratorScope());
+     if (scanning) {
+       String auths = options.get(AUTH_OPT);
+       if (auths != null && !auths.isEmpty()) {
 -        ve = new VisibilityEvaluator(new Authorizations(auths.getBytes()));
++        this.auths = new Authorizations(auths.getBytes());
+         visibleCache = new LRUMap(100);
+       }
+     }
+     
+     if (options.containsKey(MAX_BUFFER_SIZE_OPT)) {
+       maxBufferSize = AccumuloConfiguration.getMemoryInBytes(options.get(MAX_BUFFER_SIZE_OPT));
+     } else {
+       maxBufferSize = DEFAULT_MAX_BUFFER_SIZE;
+     }
+ 
+     parsedVisibilitiesCache = new LRUMap(100);
+   }
+   
+   @Override
+   public IteratorOptions describeOptions() {
+     String desc = "This iterator allows ranges of key to be transformed (with the exception of row transformations).";
+     String authDesc = "Comma-separated list of user's scan authorizations.  "
+         + "If excluded or empty, then no visibility check is performed on transformed keys.";
+     String bufferDesc = "Maximum buffer size (in accumulo memory spec) to use for buffering keys before throwing a BufferOverflowException.  " +
+     		"Users should keep this limit in mind when deciding what to transform.  That is, if transforming the column family for example, then all " +
+     		"keys sharing the same row and column family must fit within this limit (along with their associated values)";
+     HashMap<String,String> namedOptions = new HashMap<String,String>();
+     namedOptions.put(AUTH_OPT, authDesc);
+     namedOptions.put(MAX_BUFFER_SIZE_OPT, bufferDesc);
+     return new IteratorOptions(getClass().getSimpleName(), desc, namedOptions, null);
+   }
+   
+   @Override
+   public boolean validateOptions(Map<String,String> options) {
+     return true;
+   }
+   
+   @Override
+   public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
+     TransformingIterator copy;
+     
+     try {
+       copy = getClass().newInstance();
+     } catch (Exception e) {
+       throw new RuntimeException(e);
+     }
+     
+     copy.setSource(getSource().deepCopy(env));
+     
+     copy.scanning = scanning;
+     copy.keyPos = keyPos;
+     copy.keys.addAll(keys);
+     copy.seekRange = (seekRange == null) ? null : new Range(seekRange);
+     copy.seekColumnFamilies = (seekColumnFamilies == null) ? null : new HashSet<ByteSequence>(seekColumnFamilies);
+     copy.seekColumnFamiliesInclusive = seekColumnFamiliesInclusive;
+     
 -    copy.ve = ve;
++    copy.auths = auths;
+     if (visibleCache != null) {
+       copy.visibleCache = new LRUMap(visibleCache.maxSize());
+       copy.visibleCache.putAll(visibleCache);
+     }
+     
+     if (parsedVisibilitiesCache != null) {
+       copy.parsedVisibilitiesCache = new LRUMap(parsedVisibilitiesCache.maxSize());
+       copy.parsedVisibilitiesCache.putAll(parsedVisibilitiesCache);
+     }
+     
+     copy.maxBufferSize = maxBufferSize;
+     
+     return copy;
+   }
+   
+   @Override
+   public boolean hasTop() {
+     return keyPos >= 0 && keyPos < keys.size();
+   }
+   
+   @Override
+   public Key getTopKey() {
+     return hasTop() ? keys.get(keyPos).getFirst() : null;
+   }
+   
+   @Override
+   public Value getTopValue() {
+     return hasTop() ? keys.get(keyPos).getSecond() : null;
+   }
+   
+   @Override
+   public void next() throws IOException {
+     // Move on to the next entry since we returned the entry at keyPos before
+     if (keyPos >= 0)
+       keyPos++;
+     
+     // If we emptied out the transformed key map then transform the next key
+     // set from the source. It’s possible that transformation could produce keys
+     // that are outside of our range or are not visible to the end user, so after the
+     // call below we might not have added any keys to the map. Keep going until
+     // we either get some keys in the map or exhaust the source iterator.
+     while (!hasTop() && super.hasTop())
+       transformKeys();
+   }
+   
+   @Override
+   public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
+     seekRange = (range != null) ? new Range(range) : null;
+     seekColumnFamilies = columnFamilies;
+     seekColumnFamiliesInclusive = inclusive;
+     
+     // Seek the source iterator, but use a recalculated range that ensures
+     // we see all keys with the same "prefix." We need to do this since
+     // transforming could change the sort order and transformed keys that
+     // are before the range start could be inside the range after transformation.
+     super.seek(computeReseekRange(range), untransformColumnFamilies(columnFamilies), inclusive);
+     
+     // Range clipping could cause us to trim out all the keys we transformed.
+     // Keep looping until we either have some keys in the output range, or have
+     // exhausted the source iterator.
+     keyPos = -1; // “Clear” list so hasTop returns false to get us into the loop (transformKeys actually clears)
+     while (!hasTop() && super.hasTop()) {
+       // Build up a sorted list of all keys for the same prefix. When
+       // people ask for keys, return from this list first until it is empty
+       // before incrementing the source iterator.
+       transformKeys();
+     }
+   }
+ 
+   private static class RangeIterator implements SortedKeyValueIterator<Key,Value> {
+     
+     private SortedKeyValueIterator<Key,Value> source;
+     private Key prefixKey;
+     private PartialKey keyPrefix;
+     private boolean hasTop = false;
+     
+     RangeIterator(SortedKeyValueIterator<Key,Value> source, Key prefixKey, PartialKey keyPrefix) {
+       this.source = source;
+       this.prefixKey = prefixKey;
+       this.keyPrefix = keyPrefix;
+     }
+ 
+     @Override
+     public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
+       throw new UnsupportedOperationException();
+     }
+     
+     @Override
+     public boolean hasTop() {
+       // only have a top if the prefix matches
+       return hasTop = source.hasTop() && source.getTopKey().equals(prefixKey, keyPrefix);
+     }
+     
+     @Override
+     public void next() throws IOException {
+       // do not let user advance too far and try to avoid reexecuting hasTop()
+       if (!hasTop && !hasTop())
+         throw new NoSuchElementException();
+       hasTop = false;
+       source.next();
+     }
+     
+     @Override
+     public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
+       throw new UnsupportedOperationException();
+     }
+     
+     @Override
+     public Key getTopKey() {
+       return source.getTopKey();
+     }
+     
+     @Override
+     public Value getTopValue() {
+       return source.getTopValue();
+     }
+     
+     @Override
+     public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
+       throw new UnsupportedOperationException();
+     }
+     
+   }
+ 
+   /**
+    * Reads all keys matching the first key's prefix from the source iterator, transforms them, and sorts the resulting keys. Transformed keys that fall outside
+    * of our seek range or can't be seen by the user are excluded.
+    */
+   protected void transformKeys() throws IOException {
+     keyPos = -1;
+     keys.clear();
+     final Key prefixKey = super.hasTop() ? new Key(super.getTopKey()) : null;
+     
+     transformRange(new RangeIterator(getSource(), prefixKey, getKeyPrefix()), new KVBuffer() {
+       
+       long appened = 0;
+ 
+       @Override
+       public void append(Key key, Value val) {
+         // ensure the key provided by the user has the correct prefix
+         if (!key.equals(prefixKey, getKeyPrefix()))
+           throw new IllegalArgumentException("Key prefixes are not equal " + key + " " + prefixKey);
+         
+         // Transformation could have produced a key that falls outside
+         // of the seek range, or one that the user cannot see. Check
+         // these before adding it to the output list.
+         if (includeTransformedKey(key)) {
+           
+           // try to defend against a scan or compaction using all memory in a tablet server
+           if (appened > maxBufferSize)
+             throw new BufferOverflowException("Exceeded buffer size of " + maxBufferSize + ", prefixKey: " + prefixKey);
+ 
+           if (getSource().hasTop() && key == getSource().getTopKey())
+             key = new Key(key);
+           keys.add(new Pair<Key,Value>(key, new Value(val)));
+           appened += (key.getSize() + val.getSize() + 128);
+         }
+       }
+     });
+ 
+     // consume any key in range that user did not consume
+     while (super.hasTop() && super.getTopKey().equals(prefixKey, getKeyPrefix())) {
+       super.next();
+     }
+     
+     if (!keys.isEmpty()) {
+       Collections.sort(keys, keyComparator);
+       keyPos = 0;
+     }
+   }
+   
+   /**
+    * Determines whether or not to include {@code transformedKey} in the output. It is possible that transformation could have produced a key that falls outside
+    * of the seek range, a key with a visibility the user can't see, a key with a visibility that doesn't parse, or a key with a column family that wasn't
+    * fetched. We only do some checks (outside the range, user can see) if we're scanning. The range check is not done for major/minor compaction since seek
+    * ranges won't be in our transformed key space and we will never change the row so we can't produce keys that would fall outside the tablet anyway.
+    * 
+    * @param transformedKey
+    *          the key to check
+    * @return {@code true} if the key should be included and {@code false} if not
+    */
+   protected boolean includeTransformedKey(Key transformedKey) {
+     boolean include = canSee(transformedKey);
+     if (scanning && seekRange != null) {
+       include = include && seekRange.contains(transformedKey);
+     }
+     return include;
+   }
+   
+   /**
+    * Indicates whether or not the user is able to see {@code key}. If the user has not supplied authorizations, or the iterator is not in the scan scope, then
+    * this method simply returns {@code true}. Otherwise, {@code key}'s column visibility is tested against the user-supplied authorizations, and the test result
+    * is returned. For performance, the test results are cached so that the same visibility is not tested multiple times.
+    * 
+    * @param key
+    *          the key to test
+    * @return {@code true} if the key is visible or iterator is not scanning, and {@code false} if not
+    */
+   protected boolean canSee(Key key) {
+     // Ensure that the visibility (which could have been transformed) parses. Must always do this check, even if visibility is not evaluated.
+     ByteSequence visibility = key.getColumnVisibilityData();
+     ColumnVisibility colVis = null;
+     Boolean parsed = (Boolean) parsedVisibilitiesCache.get(visibility);
+     if (parsed == null) {
+       try {
+         colVis = new ColumnVisibility(visibility.toArray());
+         parsedVisibilitiesCache.put(visibility, Boolean.TRUE);
+       } catch (BadArgumentException e) {
+         log.error("Parse error after transformation : " + visibility);
+         parsedVisibilitiesCache.put(visibility, Boolean.FALSE);
+         if (scanning) {
+           return false;
+         } else {
+           throw e;
+         }
+       }
+     } else if (!parsed) {
+       if (scanning)
+         return false;
+       else
+         throw new IllegalStateException();
+     }
+     
+     Boolean visible = canSeeColumnFamily(key);
+     
 -    if (!scanning || !visible || ve == null || visibleCache == null || visibility.length() == 0)
++    if (!scanning || !visible || auths == null || visibleCache == null || visibility.length() == 0)
+       return visible;
+     
+     visible = (Boolean) visibleCache.get(visibility);
+     if (visible == null) {
+       try {
+         if (colVis == null)
+           colVis = new ColumnVisibility(visibility.toArray());
 -        visible = ve.evaluate(colVis);
++        visible = colVis.evaluate(auths);
+         visibleCache.put(visibility, visible);
 -      } catch (VisibilityParseException e) {
 -        log.error("Parse Error", e);
 -        visible = Boolean.FALSE;
+       } catch (BadArgumentException e) {
+         log.error("Parse Error", e);
+         visible = Boolean.FALSE;
+       }
+     }
+     
+     return visible;
+   }
+   
+   /**
+    * Indicates whether or not {@code key} can be seen, according to the fetched column families for this iterator.
+    * 
+    * @param key
+    *          the key whose column family is to be tested
+    * @return {@code true} if {@code key}'s column family is one of those fetched in the set passed to our {@link #seek(Range, Collection, boolean)} method
+    */
+   protected boolean canSeeColumnFamily(Key key) {
+     boolean visible = true;
+     if (seekColumnFamilies != null) {
+       ByteSequence columnFamily = key.getColumnFamilyData();
+       if (seekColumnFamiliesInclusive)
+         visible = seekColumnFamilies.contains(columnFamily);
+       else
+         visible = !seekColumnFamilies.contains(columnFamily);
+     }
+     return visible;
+   }
+   
+   /**
+    * Possibly expand {@code range} to include everything for the key prefix we are working with. That is, if our prefix is ROW_COLFAM, then we need to expand
+    * the range so we're sure to include all entries having the same row and column family as the start/end of the range.
+    * 
+    * @param range
+    *          the range to expand
+    * @return the modified range
+    */
+   protected Range computeReseekRange(Range range) {
+     Key startKey = range.getStartKey();
+     boolean startKeyInclusive = range.isStartKeyInclusive();
+     // If anything after the prefix is set, then clip the key so we include
+     // everything for the prefix.
+     if (isSetAfterPart(startKey, getKeyPrefix())) {
+       startKey = copyPartialKey(startKey, getKeyPrefix());
+       startKeyInclusive = true;
+     }
+     Key endKey = range.getEndKey();
+     boolean endKeyInclusive = range.isEndKeyInclusive();
+     if (isSetAfterPart(endKey, getKeyPrefix())) {
+       endKey = endKey.followingKey(getKeyPrefix());
+       endKeyInclusive = true;
+     }
+     return new Range(startKey, startKeyInclusive, endKey, endKeyInclusive);
+   }
+   
+   /**
+    * Indicates whether or not any part of {@code key} excluding {@code part} is set. For example, if part is ROW_COLFAM_COLQUAL, then this method determines
+    * whether or not the column visibility, timestamp, or delete flag is set on {@code key}.
+    * 
+    * @param key
+    *          the key to check
+    * @param part
+    *          the part of the key that doesn't need to be checked (everything after does)
+    * @return {@code true} if anything after {@code part} is set on {@code key}, and {@code false} if not
+    */
+   protected boolean isSetAfterPart(Key key, PartialKey part) {
+     boolean isSet = false;
+     if (key != null) {
+       // Breaks excluded on purpose.
+       switch (part) {
+         case ROW:
+           isSet = isSet || key.getColumnFamilyData().length() > 0;
+         case ROW_COLFAM:
+           isSet = isSet || key.getColumnQualifierData().length() > 0;
+         case ROW_COLFAM_COLQUAL:
+           isSet = isSet || key.getColumnVisibilityData().length() > 0;
+         case ROW_COLFAM_COLQUAL_COLVIS:
+           isSet = isSet || key.getTimestamp() < Long.MAX_VALUE;
+         case ROW_COLFAM_COLQUAL_COLVIS_TIME:
+           isSet = isSet || key.isDeleted();
+         case ROW_COLFAM_COLQUAL_COLVIS_TIME_DEL:
+           break;
+       }
+     }
+     return isSet;
+   }
+   
+   /**
+    * Creates a copy of {@code key}, copying only the parts of the key specified in {@code part}. For example, if {@code part} is ROW_COLFAM_COLQUAL, then this
+    * method would copy the row, column family, and column qualifier from {@code key} into a new key.
+    * 
+    * @param key
+    *          the key to copy
+    * @param part
+    *          the parts of {@code key} to copy
+    * @return the new key containing {@code part} of {@code key}
+    */
+   protected Key copyPartialKey(Key key, PartialKey part) {
+     Key keyCopy;
+     switch (part) {
+       case ROW:
+         keyCopy = new Key(key.getRow());
+         break;
+       case ROW_COLFAM:
+         keyCopy = new Key(key.getRow(), key.getColumnFamily());
+         break;
+       case ROW_COLFAM_COLQUAL:
+         keyCopy = new Key(key.getRow(), key.getColumnFamily(), key.getColumnQualifier());
+         break;
+       case ROW_COLFAM_COLQUAL_COLVIS:
+         keyCopy = new Key(key.getRow(), key.getColumnFamily(), key.getColumnQualifier(), key.getColumnVisibility());
+         break;
+       case ROW_COLFAM_COLQUAL_COLVIS_TIME:
+         keyCopy = new Key(key.getRow(), key.getColumnFamily(), key.getColumnQualifier(), key.getColumnVisibility(), key.getTimestamp());
+         break;
+       default:
+         throw new IllegalArgumentException("Unsupported key part: " + part);
+     }
+     return keyCopy;
+   }
+   
+   /**
+    * Make a new key with all parts (including delete flag) coming from {@code originalKey} but use {@code newColFam} as the column family.
+    */
+   protected Key replaceColumnFamily(Key originalKey, Text newColFam) {
+     byte[] row = originalKey.getRowData().toArray();
+     byte[] cf = newColFam.getBytes();
+     byte[] cq = originalKey.getColumnQualifierData().toArray();
+     byte[] cv = originalKey.getColumnVisibilityData().toArray();
+     long timestamp = originalKey.getTimestamp();
+     Key newKey = new Key(row, 0, row.length, cf, 0, newColFam.getLength(), cq, 0, cq.length, cv, 0, cv.length, timestamp);
+     newKey.setDeleted(originalKey.isDeleted());
+     return newKey;
+   }
+   
+   /**
+    * Make a new key with all parts (including delete flag) coming from {@code originalKey} but use {@code newColQual} as the column qualifier.
+    */
+   protected Key replaceColumnQualifier(Key originalKey, Text newColQual) {
+     byte[] row = originalKey.getRowData().toArray();
+     byte[] cf = originalKey.getColumnFamilyData().toArray();
+     byte[] cq = newColQual.getBytes();
+     byte[] cv = originalKey.getColumnVisibilityData().toArray();
+     long timestamp = originalKey.getTimestamp();
+     Key newKey = new Key(row, 0, row.length, cf, 0, cf.length, cq, 0, newColQual.getLength(), cv, 0, cv.length, timestamp);
+     newKey.setDeleted(originalKey.isDeleted());
+     return newKey;
+   }
+   
+   /**
+    * Make a new key with all parts (including delete flag) coming from {@code originalKey} but use {@code newColVis} as the column visibility.
+    */
+   protected Key replaceColumnVisibility(Key originalKey, Text newColVis) {
+     byte[] row = originalKey.getRowData().toArray();
+     byte[] cf = originalKey.getColumnFamilyData().toArray();
+     byte[] cq = originalKey.getColumnQualifierData().toArray();
+     byte[] cv = newColVis.getBytes();
+     long timestamp = originalKey.getTimestamp();
+     Key newKey = new Key(row, 0, row.length, cf, 0, cf.length, cq, 0, cq.length, cv, 0, newColVis.getLength(), timestamp);
+     newKey.setDeleted(originalKey.isDeleted());
+     return newKey;
+   }
+   
+   /**
+    * Make a new key with a column family, column qualifier, and column visibility. Copy the rest of the parts of the key (including delete flag) from
+    * {@code originalKey}.
+    */
+   protected Key replaceKeyParts(Key originalKey, Text newColFam, Text newColQual, Text newColVis) {
+     byte[] row = originalKey.getRowData().toArray();
+     byte[] cf = newColFam.getBytes();
+     byte[] cq = newColQual.getBytes();
+     byte[] cv = newColVis.getBytes();
+     long timestamp = originalKey.getTimestamp();
+     Key newKey = new Key(row, 0, row.length, cf, 0, newColFam.getLength(), cq, 0, newColQual.getLength(), cv, 0, newColVis.getLength(), timestamp);
+     newKey.setDeleted(originalKey.isDeleted());
+     return newKey;
+   }
+   
+   /**
+    * Make a new key with a column qualifier, and column visibility. Copy the rest of the parts of the key (including delete flag) from {@code originalKey}.
+    */
+   protected Key replaceKeyParts(Key originalKey, Text newColQual, Text newColVis) {
+     byte[] row = originalKey.getRowData().toArray();
+     byte[] cf = originalKey.getColumnFamilyData().toArray();
+     byte[] cq = newColQual.getBytes();
+     byte[] cv = newColVis.getBytes();
+     long timestamp = originalKey.getTimestamp();
+     Key newKey = new Key(row, 0, row.length, cf, 0, cf.length, cq, 0, newColQual.getLength(), cv, 0, newColVis.getLength(), timestamp);
+     newKey.setDeleted(originalKey.isDeleted());
+     return newKey;
+   }
+   
+   /**
+    * Reverses the transformation applied to column families that are fetched at seek time. If this iterator is transforming column families, then this method
+    * should be overridden to reverse the transformation on the supplied collection of column families. This is necessary since the fetch/seek will be performed
+    * in the transformed space, but when passing the column family set on to the source, the column families need to be in the untransformed space.
+    * 
+    * @param columnFamilies
+    *          the column families that have been fetched at seek time
+    * @return the untransformed column families that would transform info {@code columnFamilies}
+    */
+   protected Collection<ByteSequence> untransformColumnFamilies(Collection<ByteSequence> columnFamilies) {
+     return columnFamilies;
+   }
+   
+   /**
+    * Indicates the prefix of keys that will be transformed by this iterator. In other words, this is the part of the key that will <i>not</i> be transformed by
+    * this iterator. For example, if this method returns ROW_COLFAM, then {@link #transformKeys()} may be changing the column qualifier, column visibility, or
+    * timestamp, but it won't be changing the row or column family.
+    * 
+    * @return the part of the key this iterator is not transforming
+    */
+   abstract protected PartialKey getKeyPrefix();
+   
+   public static interface KVBuffer {
+     void append(Key key, Value val);
+   }
+   
+   /**
+    * Transforms {@code input}. This method must not change the row part of the key, and must only change the parts of the key after the return value of
+    * {@link #getKeyPrefix()}. Implementors must also remember to copy the delete flag from {@code originalKey} onto the new key. Or, implementors should use one
+    * of the helper methods to produce the new key. See any of the replaceKeyParts methods.
+    * 
+    * @param input
+    *          An iterator over a group of keys with the same prefix. This iterator provides an efficient view, bounded by the prefix, of the underlying iterator
+    *          and can not be seeked.
+    * @param output
+    *          An output buffer that holds transformed key values. All key values added to the buffer must have the same prefix as the input keys.
+    * @throws IOException
+    * @see #replaceColumnFamily(Key, Text)
+    * @see #replaceColumnQualifier(Key, Text)
+    * @see #replaceColumnVisibility(Key, Text)
+    * @see #replaceKeyParts(Key, Text, Text)
+    * @see #replaceKeyParts(Key, Text, Text, Text)
+    */
+   abstract protected void transformRange(SortedKeyValueIterator<Key,Value> input, KVBuffer output) throws IOException;
+   
+   /**
+    * Configure authoriations used for post transformation filtering.
+    * 
+    * @param config
+    * @param auths
+    */
+   public static void setAutorizations(IteratorSetting config, Authorizations auths) {
+     config.addOption(AUTH_OPT, auths.serialize());
+   }
+   
+   /**
+    * Configure the maximum amount of memory that can be used for transformation. If this memory is exceeded an exception will be thrown.
+    * 
+    * @param config
+    * @param maxBufferSize
+    *          size in bytes
+    */
+   public static void setMaxBufferSize(IteratorSetting config, long maxBufferSize) {
+     config.addOption(MAX_BUFFER_SIZE_OPT, maxBufferSize + "");
+   }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
index 1b72b33,ad7fdb2..d629cef
--- a/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
@@@ -16,9 -16,12 +16,10 @@@
   */
  package org.apache.accumulo.core.security;
  
 +import java.io.ByteArrayOutputStream;
+ import java.io.UnsupportedEncodingException;
 -import java.util.ArrayList;
  import java.util.Arrays;
 -import java.util.Collections;
 -import java.util.Comparator;
 -import java.util.List;
 +import java.util.Iterator;
  import java.util.TreeSet;
  
  import org.apache.accumulo.core.data.ArrayByteSequence;
@@@ -38,62 -52,59 +39,63 @@@ public class ColumnVisibility 
    public static enum NodeType {
      TERM, OR, AND,
    }
 -  
 -  public static class Node {
 -    public final static List<Node> EMPTY = Collections.emptyList();
 -    NodeType type;
 -    int start = 0;
 -    int end = 0;
 -    List<Node> children = EMPTY;
 +
 +  private static abstract class Node implements Comparable<Node> {
 +    protected final NodeType type;
      
 -    public Node(NodeType type) {
 +    public Node(NodeType type)
 +    {
        this.type = type;
      }
 -    
 -    public Node(int start, int end) {
 -      this.type = NodeType.TERM;
 -      this.start = start;
 -      this.end = end;
 +
 +    public byte[] generate() {
 +      ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +      generate(baos,false);
 +      return baos.toByteArray();
      }
      
 -    public void add(Node child) {
 -      if (children == EMPTY)
 -        children = new ArrayList<Node>();
 -      
 -      children.add(child);
 -    }
 +    public abstract boolean evaluate(Authorizations auths);
      
 -    public NodeType getType() {
 -      return type;
 -    }
 +    protected abstract void generate(ByteArrayOutputStream baos, boolean parens);
 +  }
 +  
 +  private static class TermNode extends Node {
      
 -    public List<Node> getChildren() {
 -      return children;
 +    final ByteSequence bs;
 +    
 +    public TermNode(final ByteSequence bs) {
 +      super(NodeType.TERM);
 +      this.bs = bs;
      }
      
 -    public int getTermStart() {
 -      return start;
 +    public boolean evaluate(Authorizations auths)
 +    {
 +      return auths.contains(bs);
 +    }
 +
 +
 +    protected void generate(ByteArrayOutputStream baos, boolean parens)
 +    {
-       baos.write(bs.getBackingArray(), bs.offset(), bs.length());
++      byte [] quoted = quote(bs.toArray());
++      baos.write(quoted, 0, quoted.length);
      }
      
 -    public int getTermEnd() {
 -      return end;
 +    @Override
 +    public boolean equals(Object other) {
 +      if(other instanceof TermNode)
 +      {
 +        return bs.compareTo(((TermNode)other).bs) == 0;
 +      }
 +      return false;
      }
      
 -    public ByteSequence getTerm(byte expression[]) {
 -      if (type != NodeType.TERM)
 -        throw new RuntimeException();
 -
 -      if (expression[start] == '"') {
 -        // its a quoted term
 -        int qStart = start + 1;
 -        int qEnd = end - 1;
 -        
 -        return new ArrayByteSequence(expression, qStart, qEnd - qStart);
 +    @Override
 +    public int compareTo(Node o) {
 +      if(o.type == NodeType.TERM)
 +      {
 +        return bs.compareTo(((TermNode)o).bs);
        }
 -      return new ArrayByteSequence(expression, start, end - start);
 +      return type.ordinal() - o.type.ordinal();
      }
    }
    
@@@ -144,81 -137,79 +146,87 @@@
        }
        return 0;
      }
 +
    }
-   
+ 
 -  /* Convience method that delegates to normalize with a new
 -   * NodeComparator constructed using the supplied expression.
 -   */
 -  private static Node normalize(Node root, byte[] expression) {
 -    return normalize(root, expression, new NodeComparator(expression));
 -  } 
 +  private static class OrNode extends AggregateNode {
  
 -  /* Walks an expression's AST in order to:
 -   *  1) roll up expressions with the same operant (`a&(b&c) becomes a&b&c`)
 -   *  2) sorts labels lexicographically (permutations of `a&b&c` are re-ordered to appear as `a&b&c`)
 -   *  3) dedupes labels (`a&b&a` becomes `a&b`)
 -   */
 -  private static Node normalize(Node root, byte[] expression, NodeComparator comparator) {
 -    if(root.type != NodeType.TERM) {
 -      TreeSet<Node> rolledUp = new TreeSet<Node>(comparator);
 -      java.util.Iterator<Node> itr = root.children.iterator();
 -      while(itr.hasNext()) { 
 -        Node c = normalize(itr.next(), expression, comparator);
 -        if(c.type == root.type) {
 -          rolledUp.addAll(c.children);
 -          itr.remove();
 -        }
 -      }
 -      rolledUp.addAll(root.children);
 -      root.children.clear();
 -      root.children.addAll(rolledUp);
 -      
 -      //need to promote a child if it's an only child
 -      if(root.children.size() == 1) {
 -        return root.children.get(0);
 -      }
 +    public OrNode() {
 +      super(NodeType.OR);
      }
  
 -    return root;
 -  }
 -
 -  /* Walks an expression's AST and appends a string representation to a supplied
 -   * StringBuilder. This method adds parens where necessary.
 -   */
 -  private static void stringify(Node root, byte[] expression, StringBuilder out) {
 -    if (root.type == NodeType.TERM) {
 -      out.append(new String(expression, root.start, root.end - root.start));
 +    @Override
 +    public boolean evaluate(Authorizations auths) {
 +      for(Node child:children)
 +        if(child.evaluate(auths))
 +          return true;
 +      return false;
      }
 -    else {
 -      String sep = "";
 -      for (Node c : root.children) {
 -        out.append(sep);
 -        boolean parens = (c.type != NodeType.TERM && root.type != c.type);
 -        if (parens)
 -          out.append("(");
 -        stringify(c, expression, out);
 -        if (parens)
 -          out.append(")");
 -        sep = root.type == NodeType.AND ? "&" : "|";
 -      }
 +
 +    @Override
 +    protected byte getOperator() {
 +      return '|';
      }
 +    
    }
-   
+ 
+   /**
+    * Generates a byte[] that represents a normalized, but logically equivalent,
+    * form of the supplied expression.
+    *
+    * @return normalized expression in byte[] form
+    */
 +  private static class AndNode extends AggregateNode {
 +
 +    public AndNode()
 +    {
 +      super(NodeType.AND);
 +    }
 +    
 +    @Override
 +    public boolean evaluate(Authorizations auths) {
 +      for(Node child:children)
 +        if(!child.evaluate(auths))
 +          return false;
 +      return true;
 +    }
 +
 +    @Override
 +    protected byte getOperator() {
 +      return '&';
 +    }
 +    
 +  }
 +
 +  private byte[] expression = null;
 +  
 +  /**
 +   * @deprecated
 +   * @see org.apache.accumulo.security.ColumnVisibility#getExpression()
 +   */
    public byte[] flatten() {
 -    Node normRoot = normalize(node, expression);
 -    StringBuilder builder = new StringBuilder(expression.length);
 -    stringify(normRoot, expression, builder);
 -    return builder.toString().getBytes();
 +    return getExpression();
-   }
+   } 
    
 +  /**
 +   * Generate the byte[] that represents this ColumnVisibility.
 +   * @return a byte[] representation of this visibility
 +   */
 +  public byte[] getExpression(){
 +    if(expression != null)
 +      return expression;
 +    expression = _flatten();
 +    return expression;
 +  }
 +  
 +  private static final byte[] emptyExpression = new byte[0];
 +  
 +  private byte[] _flatten() {
 +    if(node == null)
 +      return emptyExpression;
 +    return node.generate();
 +  }
 +  
    private static class ColumnVisibilityParser {
      private int index = 0;
      private int parens = 0;
@@@ -239,11 -230,11 +247,13 @@@
        return null;
      }
      
--    Node processTerm(int start, int end, Node expr, byte[] expression) {
++    Node processTerm(int start, int end, Node expr, byte[] expression, boolean quoted) {
        if (start != end) {
          if (expr != null)
            throw new BadArgumentException("expression needs | or &", new String(expression), start);
 -        return new Node(start, end);
++        if(quoted)
++          return new TermNode(unquote(expression, start, end - start));
 +        return new TermNode(new ArrayByteSequence(expression, start, end - start));
        }
        if (expr == null)
          throw new BadArgumentException("empty term", new String(expression), start);
@@@ -254,32 -245,36 +264,45 @@@
        Node result = null;
        Node expr = null;
        int termStart = index;
++      boolean quoted = false;
+       boolean termComplete = false;
+ 
        while (index < expression.length) {
          switch (expression[index++]) {
            case '&': {
--            expr = processTerm(termStart, index - 1, expr, expression);
++            expr = processTerm(termStart, index - 1, expr, expression, quoted);
              if (result != null) {
                if (!result.type.equals(NodeType.AND))
                  throw new BadArgumentException("cannot mix & and |", new String(expression), index - 1);
              } else {
 -              result = new Node(NodeType.AND);
 +              result = new AndNode();
              }
-             ((AggregateNode)result).children.add(expr);
 -            result.add(expr);
++            if(expr.type == NodeType.AND)
++              ((AggregateNode)result).children.addAll(((AggregateNode)expr).children);
++            else
++              ((AggregateNode)result).children.add(expr);
              expr = null;
              termStart = index;
+             termComplete = false;
++            quoted = false;
              break;
            }
            case '|': {
--            expr = processTerm(termStart, index - 1, expr, expression);
++            expr = processTerm(termStart, index - 1, expr, expression, quoted);
              if (result != null) {
                if (!result.type.equals(NodeType.OR))
                  throw new BadArgumentException("cannot mix | and &", new String(expression), index - 1);
              } else {
 -              result = new Node(NodeType.OR);
 +              result = new OrNode();
              }
-             ((AggregateNode)result).children.add(expr);
 -            result.add(expr);
++            if(expr.type == NodeType.OR)
++              ((AggregateNode)result).children.addAll(((AggregateNode)expr).children);
++            else
++              ((AggregateNode)result).children.add(expr);
              expr = null;
              termStart = index;
+             termComplete = false;
++            quoted = false;
              break;
            }
            case '(': {
@@@ -288,50 -283,62 +311,81 @@@
                throw new BadArgumentException("expression needs & or |", new String(expression), index - 1);
              expr = parse_(expression);
              termStart = index;
+             termComplete = false;
++            quoted = false;
              break;
            }
            case ')': {
              parens--;
--            Node child = processTerm(termStart, index - 1, expr, expression);
++            Node child = processTerm(termStart, index - 1, expr, expression, quoted);
              if (child == null && result == null)
                throw new BadArgumentException("empty expression not allowed", new String(expression), index);
              if (result == null)
                return child;
              if (result.type == child.type)
 -              for (Node c : child.children)
 -                result.add(c);
 +            {
 +              AggregateNode parenNode = (AggregateNode)child;
 +              for (Node c : parenNode.children)
 +                ((AggregateNode)result).children.add(c);
 +            }
              else
 -              result.add(child);
 -            result.end = index - 1;
 +              ((AggregateNode)result).children.add(child);
 +            if (result.type != NodeType.TERM)
 +            {
 +              AggregateNode resultNode = (AggregateNode)result;
 +              if (resultNode.children.size() == 1)
 +                return resultNode.children.first();
 +              if (resultNode.children.size() < 2)
 +                throw new BadArgumentException("missing term", new String(expression), index);
 +            }
              return result;
            }
+           case '"': {
+             if (termStart != index - 1)
+               throw new BadArgumentException("expression needs & or |", new String(expression), index - 1);
+ 
+             while (index < expression.length && expression[index] != '"') {
+               if (expression[index] == '\\') {
+                 index++;
+                 if (expression[index] != '\\' && expression[index] != '"')
+                   throw new BadArgumentException("invalid escaping within quotes", new String(expression), index - 1);
+               }
+               index++;
+             }
+             
+             if (index == expression.length)
+               throw new BadArgumentException("unclosed quote", new String(expression), termStart);
+             
+             if (termStart + 1 == index)
+               throw new BadArgumentException("empty term", new String(expression), termStart);
+ 
+             index++;
+             
++            quoted = true;
+             termComplete = true;
+ 
+             break;
+           }
            default: {
+             if (termComplete)
+               throw new BadArgumentException("expression needs & or |", new String(expression), index - 1);
+ 
              byte c = expression[index - 1];
              if (!Authorizations.isValidAuthChar(c))
                throw new BadArgumentException("bad character (" + c + ")", new String(expression), index - 1);
            }
          }
        }
--      Node child = processTerm(termStart, index, expr, expression);
++      Node child = processTerm(termStart, index, expr, expression, quoted);
        if (result != null)
 -        result.add(child);
 +      {
 +        if(result.type == child.type)
 +        {
 +          ((AggregateNode)result).children.addAll(((AggregateNode)child).children);
 +        }
 +        else
 +          ((AggregateNode)result).children.add(child);
 +      }
        else
          result = child;
        if (result.type != NodeType.TERM)
@@@ -432,49 -454,62 +508,137 @@@
    
    @Override
    public int hashCode() {
 -    return Arrays.hashCode(expression);
 +    return Arrays.hashCode(getExpression());
    }
    
 -  public Node getParseTree() {
 -    return node;
 +  public boolean evaluate(Authorizations auths) {
 +    if(node == null)
 +      return true;
 +    return node.evaluate(auths);
    }
    
 +  public ColumnVisibility or(ColumnVisibility other)
 +  {
 +    if(node == null)
 +      return this;
 +    if(other.node == null)
 +      return other;
 +    OrNode orNode = new OrNode();
 +    if(other.node instanceof OrNode)
 +      orNode.children.addAll(((OrNode)other.node).children);
 +    else
 +      orNode.children.add(other.node);
 +    if(node instanceof OrNode)
 +      orNode.children.addAll(((OrNode)node).children);
 +    else
 +      orNode.children.add(node);
 +    return new ColumnVisibility(orNode);
 +  }
 +  
 +  public ColumnVisibility and(ColumnVisibility other)
 +  {
 +    if(node == null)
 +      return other;
 +    if(other.node == null)
 +      return this;
 +    AndNode andNode = new AndNode();
 +    if(other.node instanceof AndNode)
 +      andNode.children.addAll(((AndNode)other.node).children);
 +    else
 +      andNode.children.add(other.node);
 +    if(node instanceof AndNode)
 +      andNode.children.addAll(((AndNode)node).children);
 +    else
 +      andNode.children.add(node);
 +    return new ColumnVisibility(andNode);
 +  }
 +
+   /**
+    * see {@link #quote(byte[])}
+    * 
+    */
+   public static String quote(String term) {
+     return quote(term, "UTF-8");
+   }
+   
+   /**
+    * see {@link #quote(byte[])}
+    * 
+    */
+   public static String quote(String term, String encoding) {
+     try {
+       return new String(quote(term.getBytes(encoding)), encoding);
+     } catch (UnsupportedEncodingException e) {
+       throw new RuntimeException(e);
+     }
+   }
+   
+   /**
+    * Use to properly quote terms in a column visibility expression. If no quoting is needed, then nothing is done.
+    * 
+    * <p>
+    * Examples of using quote :
+    * 
+    * <pre>
+    * import static org.apache.accumulo.core.security.ColumnVisibility.quote;
+    *   .
+    *   .
+    *   .
+    * ColumnVisibility cv = new ColumnVisibility(quote(&quot;A#C&quot;) + &quot;&amp;&quot; + quote(&quot;FOO&quot;));
+    * </pre>
+    * 
+    */
+ 
+   public static byte[] quote(byte[] term) {
+     boolean needsQuote = false;
+     
+     for (int i = 0; i < term.length; i++) {
+       if (!Authorizations.isValidAuthChar(term[i])) {
+         needsQuote = true;
+         break;
+       }
+     }
+     
+     if (!needsQuote)
+       return term;
+     
 -    return VisibilityEvaluator.escape(term, true);
++    return escape(term, true);
+   }
++  
++  private static byte[] escape(byte[] auth, boolean quote) {
++    int escapeCount = 0;
++    
++    for (int i = 0; i < auth.length; i++)
++      if (auth[i] == '"' || auth[i] == '\\')
++        escapeCount++;
++    
++    if (escapeCount > 0 || quote) {
++      byte[] escapedAuth = new byte[auth.length + escapeCount + (quote ? 2 : 0)];
++      int index = quote ? 1 : 0;
++      for (int i = 0; i < auth.length; i++) {
++        if (auth[i] == '"' || auth[i] == '\\')
++          escapedAuth[index++] = '\\';
++        escapedAuth[index++] = auth[i];
++      }
++      
++      if (quote) {
++        escapedAuth[0] = '"';
++        escapedAuth[escapedAuth.length - 1] = '"';
++      }
++
++      auth = escapedAuth;
++    }
++    return auth;
++  }
++  
++  private static ByteSequence unquote(byte[] expression, int start, int length) {
++    ByteArrayOutputStream baos = new ByteArrayOutputStream();
++    for(int i = start+1; i < start+length-1; i++) {
++      if(expression[i] == '\\')
++        i++;
++      baos.write(expression[i]);
++    }
++    return new ArrayByteSequence(baos.toByteArray());
++  }
++
  }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
index 080b6fb,0000000..d99cfa6
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
@@@ -1,110 -1,0 +1,108 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.file.rfile;
 +
 +import static org.junit.Assert.*;
 +
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.util.Collections;
 +import java.util.Map.Entry;
 +import java.util.Random;
 +import java.util.TreeMap;
 +
- import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
 +import org.apache.accumulo.core.file.rfile.RFileTest.SeekableByteArrayInputStream;
 +import org.apache.accumulo.core.iterators.Predicate;
 +import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
- import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
- import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
 +import org.apache.accumulo.core.iterators.system.VisibilityFilter;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FSDataInputStream;
 +import org.apache.hadoop.fs.FSDataOutputStream;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.junit.Test;
 +
 +public class AuthorizationFilterTest {
 +  
++  @SuppressWarnings("unchecked")
 +  @Test
 +  public void testRFileAuthorizationFiltering() throws Exception {
 +    Authorizations auths = new Authorizations("a", "b", "c");
 +    Predicate<Key,Value> columnVisibilityPredicate = new ColumnVisibilityPredicate(auths);
 +    int expected = 0;
 +    Random r = new Random();
 +    Configuration conf = new Configuration();
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    FSDataOutputStream dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
 +    CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(dos, "gz", conf);
 +    RFile.Writer writer = new RFile.Writer(_cbw, 1000, 1000);
 +    writer.startDefaultLocalityGroup();
 +    byte[] row = new byte[10];
 +    byte[] colFam = new byte[10];
 +    byte[] colQual = new byte[10];
 +    Value value = new Value(new byte[0]);
 +    TreeMap<Key,Value> inputBuffer = new TreeMap<Key,Value>();
 +    ColumnVisibility[] goodColVises = {new ColumnVisibility("a&b"), new ColumnVisibility("b&c"), new ColumnVisibility("a&c")};
 +    ColumnVisibility[] badColVises = {new ColumnVisibility("x"), new ColumnVisibility("y"), new ColumnVisibility("a&z")};
 +    for (ColumnVisibility colVis : goodColVises)
 +      for (int i = 0; i < 10; i++) {
 +        r.nextBytes(row);
 +        r.nextBytes(colFam);
 +        r.nextBytes(colQual);
 +        Key k = new Key(row, colFam, colQual, colVis.getExpression(), (long) i);
 +        if (columnVisibilityPredicate.evaluate(k, value))
 +          expected++;
 +        inputBuffer.put(k, value);
 +      }
 +    for (ColumnVisibility colVis : badColVises)
 +      for (int i = 0; i < 10000; i++) {
 +        r.nextBytes(row);
 +        r.nextBytes(colFam);
 +        r.nextBytes(colQual);
 +        Key k = new Key(row, colFam, colQual, colVis.getExpression(), (long) i);
 +        if (columnVisibilityPredicate.evaluate(k, value))
 +          expected++;
 +        inputBuffer.put(k, value);
 +      }
 +    for (Entry<Key,Value> e : inputBuffer.entrySet()) {
 +      writer.append(e.getKey(), e.getValue());
 +    }
 +    writer.close();
 +    
 +    // scan the RFile to bring back keys in a given timestamp range
 +    byte[] data = baos.toByteArray();
 +    
 +    ByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
 +    FSDataInputStream in = new FSDataInputStream(bais);
 +    CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, conf);
 +    RFile.Reader reader = new RFile.Reader(_cbr);
 +    int count = 0;
 +    VisibilityFilter vf = new VisibilityFilter(reader, auths, new byte[0]);
 +    vf.seek(new Range(), Collections.EMPTY_SET, false);
 +    while (vf.hasTop()) {
 +      count++;
 +      assertTrue(columnVisibilityPredicate.evaluate(vf.getTopKey(), vf.getTopValue()));
 +      vf.next();
 +    }
 +    assertEquals(expected, count);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/test/java/org/apache/accumulo/core/file/rfile/BlockIndexTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/file/rfile/BlockIndexTest.java
index 0000000,1684feb..de970fd
mode 000000,100644..100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/BlockIndexTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/BlockIndexTest.java
@@@ -1,0 -1,175 +1,176 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.accumulo.core.file.rfile;
+ 
+ import java.io.ByteArrayOutputStream;
+ import java.io.DataOutputStream;
+ import java.io.IOException;
+ 
+ import org.apache.accumulo.core.data.Key;
+ import org.apache.accumulo.core.data.Value;
+ import org.apache.accumulo.core.file.blockfile.ABlockReader;
+ import org.apache.accumulo.core.file.blockfile.cache.CacheEntry;
+ import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
+ import org.apache.accumulo.core.file.rfile.BlockIndex.BlockIndexEntry;
+ import org.apache.accumulo.core.file.rfile.MultiLevelIndex.IndexEntry;
++import org.apache.accumulo.core.security.ColumnVisibility;
+ import org.junit.Assert;
+ import org.junit.Test;
+ 
+ /**
+  * 
+  */
+ public class BlockIndexTest {
+   
+   private static class MyCacheEntry implements CacheEntry {
+     Object idx;
+     byte[] data;
+     
+     MyCacheEntry(byte[] d) {
+       this.data = d;
+     }
+     
+     @Override
+     public void setIndex(Object idx) {
+       this.idx = idx;
+     }
+     
+     @Override
+     public Object getIndex() {
+       return idx;
+     }
+     
+     @Override
+     public byte[] getBuffer() {
+       return data;
+     }
+   }
+ 
+   @Test
+   public void test1() throws IOException {
+     ByteArrayOutputStream baos = new ByteArrayOutputStream();
+     DataOutputStream out = new DataOutputStream(baos);
+     
+     Key prevKey = null;
+     
+     int num = 1000;
+     
+     for (int i = 0; i < num; i++) {
+       Key key = new Key(RFileTest.nf("", i), "cf1", "cq1");
+       new RelativeKey(prevKey, key).write(out);
+       new Value(new byte[0]).write(out);
+       prevKey = key;
+     }
+     
+     out.close();
+     final byte[] data = baos.toByteArray();
+     
+     CacheEntry ce = new MyCacheEntry(data);
+ 
+     ABlockReader cacheBlock = new CachableBlockFile.CachedBlockRead(ce, data);
+     BlockIndex blockIndex = null;
+     
+     for (int i = 0; i < 129; i++)
 -      blockIndex = BlockIndex.getIndex(cacheBlock, new IndexEntry(prevKey, num, 0, 0, 0));
++      blockIndex = BlockIndex.getIndex(cacheBlock, new IndexEntry(prevKey, new BlockStats(Long.MAX_VALUE, Long.MAX_VALUE, new ColumnVisibility(), num), 0, 0, 0, RFile.RINDEX_VER_7));
+     
+     BlockIndexEntry[] indexEntries = blockIndex.getIndexEntries();
+     
+     for (int i = 0; i < indexEntries.length; i++) {
+       int row = Integer.parseInt(indexEntries[i].getPrevKey().getRowData().toString());
+       
+       BlockIndexEntry bie;
+       
+ 
+       bie = blockIndex.seekBlock(new Key(RFileTest.nf("", row), "cf1", "cq1"), cacheBlock);
+       if (i == 0)
+         Assert.assertSame(null, bie);
+       else
+         Assert.assertSame(indexEntries[i - 1], bie);
+       
+       Assert.assertSame(bie, blockIndex.seekBlock(new Key(RFileTest.nf("", row - 1), "cf1", "cq1"), cacheBlock));
+ 
+       bie = blockIndex.seekBlock(new Key(RFileTest.nf("", row + 1), "cf1", "cq1"), cacheBlock);
+       Assert.assertSame(indexEntries[i], bie);
+ 
+       RelativeKey rk = new RelativeKey();
+       rk.setPrevKey(bie.getPrevKey());
+       rk.readFields(cacheBlock);
+       
+       Assert.assertEquals(rk.getKey(), new Key(RFileTest.nf("", row + 1), "cf1", "cq1"));
+ 
+     }
+   }
+ 
+   @Test
+   public void testSame() throws IOException {
+     ByteArrayOutputStream baos = new ByteArrayOutputStream();
+     DataOutputStream out = new DataOutputStream(baos);
+     
+     Key prevKey = null;
+     
+     int num = 1000;
+     
+     for (int i = 0; i < num; i++) {
+       Key key = new Key(RFileTest.nf("", 1), "cf1", "cq1");
+       new RelativeKey(prevKey, key).write(out);
+       new Value(new byte[0]).write(out);
+       prevKey = key;
+     }
+     
+     for (int i = 0; i < num; i++) {
+       Key key = new Key(RFileTest.nf("", 3), "cf1", "cq1");
+       new RelativeKey(prevKey, key).write(out);
+       new Value(new byte[0]).write(out);
+       prevKey = key;
+     }
+     
+     for (int i = 0; i < num; i++) {
+       Key key = new Key(RFileTest.nf("", 5), "cf1", "cq1");
+       new RelativeKey(prevKey, key).write(out);
+       new Value(new byte[0]).write(out);
+       prevKey = key;
+     }
+     
+     out.close();
+     final byte[] data = baos.toByteArray();
+     
+     CacheEntry ce = new MyCacheEntry(data);
+ 
+     ABlockReader cacheBlock = new CachableBlockFile.CachedBlockRead(ce, data);
+     BlockIndex blockIndex = null;
+     
+     for (int i = 0; i < 257; i++)
 -      blockIndex = BlockIndex.getIndex(cacheBlock, new IndexEntry(prevKey, num, 0, 0, 0));
++      blockIndex = BlockIndex.getIndex(cacheBlock, new IndexEntry(prevKey, new BlockStats(Long.MAX_VALUE, Long.MAX_VALUE, new ColumnVisibility(), num), 0, 0, 0, RFile.RINDEX_VER_7));
+     
+     Assert.assertSame(null, blockIndex.seekBlock(new Key(RFileTest.nf("", 0), "cf1", "cq1"), cacheBlock));
+     Assert.assertSame(null, blockIndex.seekBlock(new Key(RFileTest.nf("", 1), "cf1", "cq1"), cacheBlock));
+     
+     for (int i = 2; i < 6; i++) {
+       Key seekKey = new Key(RFileTest.nf("", i), "cf1", "cq1");
+       BlockIndexEntry bie = blockIndex.seekBlock(seekKey, cacheBlock);
+       
+       Assert.assertTrue(bie.getPrevKey().compareTo(seekKey) < 0);
+ 
+       RelativeKey rk = new RelativeKey();
+       rk.setPrevKey(bie.getPrevKey());
+       rk.readFields(cacheBlock);
+       
+       Assert.assertTrue(rk.getKey().compareTo(seekKey) <= 0);
+     }
+ 
+   }
+ }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
index 25bea00,00405d1..a248bd7
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
@@@ -37,13 -36,9 +37,13 @@@ import org.apache.accumulo.core.util.Ca
  import org.apache.hadoop.fs.FSDataInputStream;
  import org.apache.hadoop.fs.FSDataOutputStream;
  import org.apache.hadoop.fs.FileSystem;
 +import org.junit.Test;
 +
- import static junit.framework.Assert.*;
++import static org.junit.Assert.*;
  
 -public class MultiLevelIndexTest extends TestCase {
 +public class MultiLevelIndexTest {
    
 +  @Test
    public void test1() throws Exception {
      
      runTest(500, 1);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
index 71f5c6c,a23b6cc..6620649
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
@@@ -16,6 -16,10 +16,8 @@@
   */
  package org.apache.accumulo.core.file.rfile;
  
 -import static org.junit.Assert.assertEquals;
 -import static org.junit.Assert.assertFalse;
 -import static org.junit.Assert.assertTrue;
++import static org.junit.Assert.*;
+ 
  import java.io.ByteArrayInputStream;
  import java.io.ByteArrayOutputStream;
  import java.io.DataInputStream;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
index f72a42b,0000000..731b225
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
@@@ -1,99 -1,0 +1,97 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.file.rfile;
 +
 +import static org.junit.Assert.*;
 +
 +import java.io.ByteArrayInputStream;
 +import java.io.ByteArrayOutputStream;
 +import java.util.Collections;
 +import java.util.Map.Entry;
 +import java.util.Random;
 +import java.util.TreeMap;
 +
- import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
 +import org.apache.accumulo.core.file.rfile.RFileTest.SeekableByteArrayInputStream;
 +import org.apache.accumulo.core.iterators.Predicate;
 +import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
- import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
 +import org.apache.accumulo.core.iterators.system.GenericFilterer;
- import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FSDataInputStream;
 +import org.apache.hadoop.fs.FSDataOutputStream;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.junit.Test;
 +
 +public class TimestampFilterTest {
 +  
++  @SuppressWarnings("unchecked")
 +  @Test
 +  public void testRFileTimestampFiltering() throws Exception {
 +    Predicate<Key,Value> timeRange = new TimestampRangePredicate(73, 117);
 +    int expected = 0;
 +    Random r = new Random();
 +    Configuration conf = new Configuration();
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    FSDataOutputStream dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
 +    CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(dos, "gz", conf);
 +    RFile.Writer writer = new RFile.Writer(_cbw, 1000, 1000);
 +    writer.startDefaultLocalityGroup();
 +    byte [] row = new byte[10];
 +    byte [] colFam = new byte[10];
 +    byte [] colQual = new byte[10];
 +    Value value = new Value(new byte[0]);
 +    byte [] colVis = new byte[0];
 +    TreeMap<Key,Value> inputBuffer = new TreeMap<Key,Value>();
 +    for(int i = 0; i < 100000; i++)
 +    {
 +      r.nextBytes(row);
 +      r.nextBytes(colFam);
 +      r.nextBytes(colQual);
 +      Key k = new Key(row,colFam,colQual,colVis,(long)i);
 +      if(timeRange.evaluate(k, value))
 +        expected++;
 +      inputBuffer.put(k, value);
 +    }
 +    for(Entry<Key,Value> e:inputBuffer.entrySet())
 +    {
 +      writer.append(e.getKey(), e.getValue());
 +    }
 +    writer.close();
 +
 +    // scan the RFile to bring back keys in a given timestamp range
 +    byte[] data = baos.toByteArray();
 +    ByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
 +    FSDataInputStream in = new FSDataInputStream(bais);
 +    CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, conf);
 +    RFile.Reader reader = new RFile.Reader(_cbr);
 +    GenericFilterer filterer = new GenericFilterer(reader);
 +    int count = 0;
 +    filterer.applyFilter(timeRange,true);
 +    filterer.seek(new Range(), Collections.EMPTY_SET, false);
 +    while(filterer.hasTop())
 +    {
 +      count++;
 +      assertTrue(timeRange.evaluate(filterer.getTopKey(),filterer.getTopValue()));
 +      filterer.next();
 +    }
 +    assertEquals(expected, count);
 +  }
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/test/java/org/apache/accumulo/core/iterators/user/FilterTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/test/java/org/apache/accumulo/core/iterators/user/IndexedDocIteratorTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
index d463f42,6dfc8e5..3a90009
--- a/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
@@@ -81,51 -82,13 +81,64 @@@ public class ColumnVisibilityTest 
    @Test
    public void testNormalization() {
      normalized("a", "a", "(a)", "a", "b|a", "a|b", "(b)|a", "a|b", "(b|(a|c))&x", "x&(a|b|c)", "(((a)))", "a");
 +    normalized("a|a", "a", "a|(a&a)", "a", "(a&b)|(b&a)", "a&b");
 +    normalized("a|(a|(a|b))","a|b");
 +    normalized("a|(a|(a|a))","a");
+     final String normForm = "a&b&c";
 -    normalized("b&c&a", normForm, "c&b&a", normForm, "a&(b&c)", normForm, "(a&c)&b", normForm);
++    normalized("b&c&a", normForm);
++    normalized("c&b&a", normForm);
++    normalized("a&(b&c)", normForm);
++    normalized("(a&c)&b", normForm);
++    final String normForm2 = "a|b|c";
++    normalized("b|c|a", normForm2);
++    normalized("c|b|a", normForm2);
++    normalized("a|(b|c)", normForm2);
++    normalized("(a|c)|b", normForm2);
+ 
+     // this an expression that's basically `expr | expr`
+     normalized("(d&c&b&a)|(b&c&a&d)", "a&b&c&d");
    }
    
 +  public void aOrBEqualC(String a, String b, String c)
 +  {
 +    ColumnVisibility cvA = new ColumnVisibility(a.getBytes());
 +    ColumnVisibility cvB = new ColumnVisibility(b.getBytes());
 +    ColumnVisibility cvC = cvA.or(cvB);
 +    assertArrayEquals(cvC.getExpression(), c.getBytes());
 +    // check that we didn't disturb the original ColumnVisibilities
 +    assertArrayEquals(cvA.getExpression(), a.getBytes());
 +    assertArrayEquals(cvB.getExpression(), b.getBytes());
 +  }
 +  
 +  @Test
 +  public void testDisjunction() {
 +    aOrBEqualC("a", "b", "a|b");
 +    aOrBEqualC("c|(a&b)", "b", "b|c|(a&b)");
 +    aOrBEqualC("c|(a&b)", "a|c","a|c|(a&b)");
 +    aOrBEqualC("a&b","c&d","(a&b)|(c&d)");
 +    aOrBEqualC("a","","");
 +  }
 +  
 +  public void aAndBEqualC(String a, String b, String c)
 +  {
 +    ColumnVisibility cvA = new ColumnVisibility(a.getBytes());
 +    ColumnVisibility cvB = new ColumnVisibility(b.getBytes());
 +    ColumnVisibility cvC = cvA.and(cvB);
 +    assertArrayEquals(cvC.getExpression(), c.getBytes());
 +    // check that we didn't disturb the original ColumnVisibilities
 +    assertArrayEquals(cvA.getExpression(), a.getBytes());
 +    assertArrayEquals(cvB.getExpression(), b.getBytes());
 +  }
 +  
 +  @Test
 +  public void testConjunction() {
 +    aAndBEqualC("a", "b", "a&b");
 +    aAndBEqualC("a&b", "c", "a&b&c");
 +    aAndBEqualC("a&(b|(c&d))", "e&(b|(c&d))","a&e&(b|(c&d))");
 +    aAndBEqualC("a|b","c|d","(a|b)&(c|d)");
 +    aAndBEqualC("a","","a");
 +  }
 +
    @Test
    public void testDanglingOperators() {
      shouldThrow("a|b&");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java
index 7612e15,64aeeeb..d9b2ff0
--- a/core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java
@@@ -52,9 -58,9 +58,9 @@@ public class VisibilityEvaluatorTest 
      // test missing separators; these should throw an exception
      for (String marking : new String[] {"one(five)", "(five)one", "(one)(two)", "a|(b(c))"}) {
        try {
 -        ct.evaluate(new ColumnVisibility(marking));
 +        new ColumnVisibility(marking).evaluate(auths);
          fail(marking + " failed to throw");
-       } catch (Throwable e) {
+       } catch (BadArgumentException e) {
          // all is good
        }
      }
@@@ -62,9 -68,9 +68,9 @@@
      // test unexpected separator
      for (String marking : new String[] {"&(five)", "|(five)", "(five)&", "five|", "a|(b)&", "(&five)", "(five|)"}) {
        try {
 -        ct.evaluate(new ColumnVisibility(marking));
 +        new ColumnVisibility(marking).evaluate(auths);
          fail(marking + " failed to throw");
-       } catch (Throwable e) {
+       } catch (BadArgumentException e) {
          // all is good
        }
      }
@@@ -72,11 -78,49 +78,47 @@@
      // test mismatched parentheses
      for (String marking : new String[] {"(", ")", "(a&b", "b|a)"}) {
        try {
 -        ct.evaluate(new ColumnVisibility(marking));
 +        new ColumnVisibility(marking).evaluate(auths);
          fail(marking + " failed to throw");
-       } catch (Throwable e) {
+       } catch (BadArgumentException e) {
          // all is good
        }
      }
    }
+   
+   @Test
 -  public void testQuotedExpressions() throws VisibilityParseException {
 -    VisibilityEvaluator ct = new VisibilityEvaluator(new Authorizations("A#C", "A\"C", "A\\C", "AC"));
++  public void testQuotedExpressions() {
++    Authorizations auths = new Authorizations("A#C", "A\"C", "A\\C", "AC");
+     
 -    assertTrue(ct.evaluate(new ColumnVisibility(quote("A#C") + "|" + quote("A?C"))));
 -    assertTrue(ct.evaluate(new ColumnVisibility(new ColumnVisibility(quote("A#C") + "|" + quote("A?C")).flatten())));
 -    assertTrue(ct.evaluate(new ColumnVisibility(quote("A\"C") + "&" + quote("A\\C"))));
 -    assertTrue(ct.evaluate(new ColumnVisibility(new ColumnVisibility(quote("A\"C") + "&" + quote("A\\C")).flatten())));
 -    assertTrue(ct.evaluate(new ColumnVisibility("(" + quote("A\"C") + "|B)&(" + quote("A#C") + "|D)")));
 -    
 -    assertFalse(ct.evaluate(new ColumnVisibility(quote("A#C") + "&B")));
 -    
 -    assertTrue(ct.evaluate(new ColumnVisibility(quote("A#C"))));
 -    assertTrue(ct.evaluate(new ColumnVisibility("(" + quote("A#C") + ")")));
++    assertTrue((new ColumnVisibility(quote("A#C") + "|" + quote("A?C"))).evaluate(auths));
++    assertTrue((new ColumnVisibility(new ColumnVisibility(quote("A#C") + "|" + quote("A?C")).getExpression())).evaluate(auths));
++    assertTrue((new ColumnVisibility(quote("A\"C") + "&" + quote("A\\C"))).evaluate(auths));
++    assertTrue((new ColumnVisibility(new ColumnVisibility(quote("A\"C") + "&" + quote("A\\C")).getExpression())).evaluate(auths));
++    assertTrue((new ColumnVisibility("(" + quote("A\"C") + "|B)&(" + quote("A#C") + "|D)")).evaluate(auths));
++    assertFalse((new ColumnVisibility(quote("A#C") + "&B")).evaluate(auths));
++    assertTrue((new ColumnVisibility(quote("A#C"))).evaluate(auths));
++    assertTrue((new ColumnVisibility("(" + quote("A#C") + ")")).evaluate(auths));
+   }
+   
+   @Test
+   public void testQuote() {
+     assertEquals("\"A#C\"", quote("A#C"));
+     assertEquals("\"A\\\"C\"", quote("A\"C"));
+     assertEquals("\"A\\\"\\\\C\"", quote("A\"\\C"));
+     assertEquals("ACS", quote("ACS"));
+     assertEquals("\"九\"", quote("九"));
+     assertEquals("\"五十\"", quote("五十"));
+   }
+   
+   @Test
 -  public void testNonAscii() throws VisibilityParseException, UnsupportedEncodingException {
 -    VisibilityEvaluator ct = new VisibilityEvaluator(new Authorizations(Charset.forName("UTF-8"), "五", "六", "八", "九", "五十"));
++  public void testNonAscii() throws UnsupportedEncodingException {
++    Authorizations auths = new Authorizations(Charset.forName("UTF-8"), "五", "六", "八", "九", "五十");
+     
 -    assertTrue(ct.evaluate(new ColumnVisibility(quote("五") + "|" + quote("四"), "UTF-8")));
 -    assertFalse(ct.evaluate(new ColumnVisibility(quote("五") + "&" + quote("四"), "UTF-8")));
 -    assertTrue(ct.evaluate(new ColumnVisibility(quote("五") + "&(" + quote("四") + "|" + quote("九") + ")", "UTF-8")));
 -    assertTrue(ct.evaluate(new ColumnVisibility("\"五\"&(\"四\"|\"五十\")", "UTF-8")));
 -    assertFalse(ct.evaluate(new ColumnVisibility(quote("五") + "&(" + quote("四") + "|" + quote("三") + ")", "UTF-8")));
 -    assertFalse(ct.evaluate(new ColumnVisibility("\"五\"&(\"四\"|\"三\")", "UTF-8")));
++    assertTrue((new ColumnVisibility(quote("五") + "|" + quote("四"), "UTF-8")).evaluate(auths));
++    assertFalse((new ColumnVisibility(quote("五") + "&" + quote("四"), "UTF-8")).evaluate(auths));
++    assertTrue((new ColumnVisibility(quote("五") + "&(" + quote("四") + "|" + quote("九") + ")", "UTF-8")).evaluate(auths));
++    assertTrue((new ColumnVisibility("\"五\"&(\"四\"|\"五十\")", "UTF-8")).evaluate(auths));
++    assertFalse((new ColumnVisibility(quote("五") + "&(" + quote("四") + "|" + quote("三") + ")", "UTF-8")).evaluate(auths));
++    assertFalse((new ColumnVisibility("\"五\"&(\"四\"|\"三\")", "UTF-8")).evaluate(auths));
+   }
  }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/docs/examples/README.bloom
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/docs/examples/README.mapred
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/docs/examples/README.maxmutation
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/examples/instamo/pom.xml
----------------------------------------------------------------------
diff --cc examples/instamo/pom.xml
index 0000000,6ba714c..b50cb79
mode 000000,100644..100644
--- a/examples/instamo/pom.xml
+++ b/examples/instamo/pom.xml
@@@ -1,0 -1,117 +1,117 @@@
+ <?xml version="1.0"?>
+ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+   <modelVersion>4.0.0</modelVersion>
+   <groupId>org.apache.accumulo</groupId>
+   <artifactId>instamo</artifactId>
+   <packaging>jar</packaging>
 -  <version>1.5.0-SNAPSHOT</version>
++  <version>ACCUMULO-652-SNAPSHOT</version>
+   <name>instamo</name>
+   <url>http://maven.apache.org</url>
+   <properties>
 -    <accumulo.version>1.5.0-SNAPSHOT</accumulo.version>
++    <accumulo.version>ACCUMULO-652-SNAPSHOT</accumulo.version>
+     <hadoop-one.version>1.0.4</hadoop-one.version>
+     <hadoop-two.version>2.0.2-alpha</hadoop-two.version>
+     <maclass>org.apache.accumulo.instamo.MapReduceExample</maclass>
+   </properties>
+   <profiles>
+     <!-- profile for building against Hadoop 1.0.x
+     Activate by not specifying hadoop.profile -->
+     <profile>
+       <id>hadoop-1.0</id>
+       <activation>
+         <property>
+           <name>!hadoop.profile</name>
+         </property>
+       </activation>
+       <properties>
+         <hadoop.version>${hadoop-one.version}</hadoop.version>
+       </properties>
+       <dependencies>
+         <dependency>
+           <groupId>org.apache.hadoop</groupId>
+           <artifactId>hadoop-core</artifactId>
+           <version>${hadoop.version}</version>
+         </dependency>
+       </dependencies>
+     </profile>
+     <!-- profile for building against Hadoop 2.0.x
+     Activate using: mvn -Dhadoop.profile=2.0 -->
+     <profile>
+       <id>hadoop-2.0</id>
+       <activation>
+         <property>
+           <name>hadoop.profile</name>
+           <value>2.0</value>
+         </property>
+       </activation>
+       <properties>
+         <hadoop.version>${hadoop-two.version}</hadoop.version>
+       </properties>
+       <dependencies>
+         <dependency>
+           <groupId>org.apache.hadoop</groupId>
+           <artifactId>hadoop-client</artifactId>
+           <version>${hadoop.version}</version>
+         </dependency>
+       </dependencies>
+     </profile>
+   </profiles>
+   <dependencies>
+     <dependency>
+       <groupId>junit</groupId>
+       <artifactId>junit</artifactId>
+       <version>4.11</version>
+       <scope>test</scope>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.accumulo</groupId>
+       <artifactId>accumulo-core</artifactId>
+       <version>${accumulo.version}</version>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.accumulo</groupId>
+       <artifactId>accumulo-server</artifactId>
+       <version>${accumulo.version}</version>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.zookeeper</groupId>
+       <artifactId>zookeeper</artifactId>
+       <version>3.3.4</version>
+     </dependency>
+     <dependency>
+       <groupId>commons-io</groupId>
+       <artifactId>commons-io</artifactId>
+       <version>2.4</version>
+     </dependency>
+     <dependency>
+     	<groupId>org.apache.accumulo</groupId>
+     	<artifactId>accumulo-test</artifactId>
+     	<version>${accumulo.version}</version>
+     </dependency>
+   </dependencies>
+   <build>
+     <plugins>
+       <plugin>
+         <artifactId>maven-compiler-plugin</artifactId>
+         <version>2.0.2</version>
+         <configuration>
+           <source>1.6</source>
+           <target>1.6</target>
+         </configuration>
+       </plugin>
+       <plugin>
+         <groupId>org.codehaus.mojo</groupId>
+         <artifactId>exec-maven-plugin</artifactId>
+         <version>1.2.1</version>
+         <configuration>
+           <executable>java</executable>
+           <arguments>
+             <argument>-classpath</argument>
+             <classpath/>
+             <argument>${maclass}</argument>
+           </arguments>
+         </configuration>
+       </plugin>
+     </plugins>
+   </build>
+ </project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/examples/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/examples/simple/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/fate/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/pom.xml
----------------------------------------------------------------------
diff --cc pom.xml
index a8fe066,54c1fbb..c76e612
--- a/pom.xml
+++ b/pom.xml
@@@ -549,8 -618,13 +618,13 @@@
        </dependency>
        <dependency>
          <groupId>org.apache.accumulo</groupId>
+         <artifactId>accumulo-test</artifactId>
 -        <version>1.5.0-SNAPSHOT</version>
++        <version>ACCUMULO-652-SNAPSHOT</version>
+       </dependency>
+       <dependency>
+         <groupId>org.apache.accumulo</groupId>
          <artifactId>examples-simple</artifactId>
 -        <version>1.5.0-SNAPSHOT</version>
 +        <version>ACCUMULO-652-SNAPSHOT</version>
        </dependency>
        <dependency>
          <groupId>org.apache.accumulo</groupId>


[13/15] ACCUMULO-652 merged changes from trunk

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/pom.xml
----------------------------------------------------------------------
diff --cc proxy/pom.xml
index 0000000,d886c7c..3d015b3
mode 000000,100644..100644
--- a/proxy/pom.xml
+++ b/proxy/pom.xml
@@@ -1,0 -1,150 +1,150 @@@
+ <?xml version="1.0"?>
+ <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+ 
+       http://www.apache.org/licenses/LICENSE-2.0
+ 
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ -->
+ <project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
+   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+   <modelVersion>4.0.0</modelVersion>
+   <parent>
+     <artifactId>accumulo</artifactId>
+     <groupId>org.apache.accumulo</groupId>
 -    <version>1.5.0-SNAPSHOT</version>
++    <version>ACCUMULO-652-SNAPSHOT</version>
+   </parent>
+   <artifactId>accumulo-proxy</artifactId>
+   <name>accumulo-proxy</name>
+   <url>http://maven.apache.org</url>
+   <properties>
+     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+   </properties>
+   <build>
+     <pluginManagement>
+       <plugins>
+         <plugin>
+           <artifactId>maven-jar-plugin</artifactId>
+           <configuration>
+             <outputDirectory>../lib</outputDirectory>
+           </configuration>
+         </plugin>
+       </plugins>
+     </pluginManagement>
+     <plugins>
+       <plugin>
+         <groupId>org.codehaus.mojo</groupId>
+         <artifactId>exec-maven-plugin</artifactId>
+         <executions>
+           <execution>
+             <id>generate-thrift</id>
+             <phase>generate-sources</phase>
+             <goals>
+               <goal>exec</goal>
+             </goals>
+             <configuration>
+               <executable>${basedir}/src/main/scripts/generate-thrift.sh</executable>
+             </configuration>
+           </execution>
+         </executions>
+       </plugin>
+     </plugins>
+   </build>
+   <dependencies>
+     <dependency>
+       <groupId>org.apache.accumulo</groupId>
+       <artifactId>accumulo-core</artifactId>
+       <scope>compile</scope>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.zookeeper</groupId>
+       <artifactId>zookeeper</artifactId>
+       <scope>compile</scope>
+     </dependency>
+     <dependency>
+       <groupId>junit</groupId>
+       <artifactId>junit</artifactId>
+       <scope>test</scope>
+     </dependency>
+     <dependency>
+       <groupId>org.apache.thrift</groupId>
+       <artifactId>libthrift</artifactId>
+       <scope>compile</scope>
+     </dependency>
+     <dependency>
+       <groupId>com.google.guava</groupId>
+       <artifactId>guava</artifactId>
+       <version>13.0.1</version>
+       <scope>compile</scope>
+     </dependency>
+     <dependency>
+       <groupId>commons-cli</groupId>
+       <artifactId>commons-cli</artifactId>
+       <scope>compile</scope>
+     </dependency>
+     <dependency>
+       <groupId>com.beust</groupId>
+       <artifactId>jcommander</artifactId>
+     </dependency>
+     <dependency>
+     	<groupId>org.apache.accumulo</groupId>
+     	<artifactId>instamo</artifactId>
 -    	<version>1.5.0-SNAPSHOT</version>
++    	<version>ACCUMULO-652-SNAPSHOT</version>
+     	<scope>test</scope>
+     </dependency>
+     <dependency>
+     	<groupId>org.apache.accumulo</groupId>
+     	<artifactId>examples-simple</artifactId>
+     	<scope>test</scope>
+     </dependency>
+   </dependencies>
+ 
+   <profiles>
+     <!-- profile for building against Hadoop 1.0.x
+     Activate by not specifying hadoop.profile -->
+     <profile>
+       <id>hadoop-1.0</id>
+       <activation>
+         <property>
+           <name>!hadoop.profile</name>
+         </property>
+       </activation>
+       <dependencies>
+         <dependency>
+           <groupId>org.apache.hadoop</groupId>
+           <artifactId>hadoop-core</artifactId>
+         </dependency>
+       </dependencies>
+     </profile>
+     <!-- profile for building against Hadoop 2.0.x
+     Activate using: mvn -Dhadoop.profile=2.0 -->
+     <profile>
+       <id>hadoop-2.0</id>
+       <activation>
+         <property>
+           <name>hadoop.profile</name>
+           <value>2.0</value>
+         </property>
+       </activation>
+       <dependencies>
+         <dependency>
+           <groupId>org.apache.hadoop</groupId>
+           <artifactId>hadoop-client</artifactId>
+         </dependency>
+         <dependency>
+           <groupId>org.apache.avro</groupId>
+           <artifactId>avro</artifactId>
+         </dependency>
+       </dependencies>
+     </profile>
+   </profiles>
+ </project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/IOException.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/IOException.java
index 0000000,0000000..574c5be
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/IOException.java
@@@ -1,0 -1,0 +1,402 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings("all") public class IOException extends TException implements org.apache.thrift.TBase<IOException, IOException._Fields>, java.io.Serializable, Cloneable {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IOException");
++
++  private static final org.apache.thrift.protocol.TField MSG_FIELD_DESC = new org.apache.thrift.protocol.TField("msg", org.apache.thrift.protocol.TType.STRING, (short)1);
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new IOExceptionStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new IOExceptionTupleSchemeFactory());
++  }
++
++  public String msg; // required
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++    MSG((short)1, "msg");
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        case 1: // MSG
++          return MSG;
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++
++  // isset id assignments
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    tmpMap.put(_Fields.MSG, new org.apache.thrift.meta_data.FieldMetaData("msg", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(IOException.class, metaDataMap);
++  }
++
++  public IOException() {
++  }
++
++  public IOException(
++    String msg)
++  {
++    this();
++    this.msg = msg;
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public IOException(IOException other) {
++    if (other.isSetMsg()) {
++      this.msg = other.msg;
++    }
++  }
++
++  public IOException deepCopy() {
++    return new IOException(this);
++  }
++
++  @Override
++  public void clear() {
++    this.msg = null;
++  }
++
++  public String getMsg() {
++    return this.msg;
++  }
++
++  public IOException setMsg(String msg) {
++    this.msg = msg;
++    return this;
++  }
++
++  public void unsetMsg() {
++    this.msg = null;
++  }
++
++  /** Returns true if field msg is set (has been assigned a value) and false otherwise */
++  public boolean isSetMsg() {
++    return this.msg != null;
++  }
++
++  public void setMsgIsSet(boolean value) {
++    if (!value) {
++      this.msg = null;
++    }
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    case MSG:
++      if (value == null) {
++        unsetMsg();
++      } else {
++        setMsg((String)value);
++      }
++      break;
++
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    case MSG:
++      return getMsg();
++
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    case MSG:
++      return isSetMsg();
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof IOException)
++      return this.equals((IOException)that);
++    return false;
++  }
++
++  public boolean equals(IOException that) {
++    if (that == null)
++      return false;
++
++    boolean this_present_msg = true && this.isSetMsg();
++    boolean that_present_msg = true && that.isSetMsg();
++    if (this_present_msg || that_present_msg) {
++      if (!(this_present_msg && that_present_msg))
++        return false;
++      if (!this.msg.equals(that.msg))
++        return false;
++    }
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    return 0;
++  }
++
++  public int compareTo(IOException other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++    IOException typedOther = (IOException)other;
++
++    lastComparison = Boolean.valueOf(isSetMsg()).compareTo(typedOther.isSetMsg());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetMsg()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.msg, typedOther.msg);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("IOException(");
++    boolean first = true;
++
++    sb.append("msg:");
++    if (this.msg == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.msg);
++    }
++    first = false;
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    // check for sub-struct validity
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class IOExceptionStandardSchemeFactory implements SchemeFactory {
++    public IOExceptionStandardScheme getScheme() {
++      return new IOExceptionStandardScheme();
++    }
++  }
++
++  private static class IOExceptionStandardScheme extends StandardScheme<IOException> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, IOException struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          case 1: // MSG
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.msg = iprot.readString();
++              struct.setMsgIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++
++      // check for required fields of primitive type, which can't be checked in the validate method
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, IOException struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      if (struct.msg != null) {
++        oprot.writeFieldBegin(MSG_FIELD_DESC);
++        oprot.writeString(struct.msg);
++        oprot.writeFieldEnd();
++      }
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class IOExceptionTupleSchemeFactory implements SchemeFactory {
++    public IOExceptionTupleScheme getScheme() {
++      return new IOExceptionTupleScheme();
++    }
++  }
++
++  private static class IOExceptionTupleScheme extends TupleScheme<IOException> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, IOException struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++      BitSet optionals = new BitSet();
++      if (struct.isSetMsg()) {
++        optionals.set(0);
++      }
++      oprot.writeBitSet(optionals, 1);
++      if (struct.isSetMsg()) {
++        oprot.writeString(struct.msg);
++      }
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, IOException struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++      BitSet incoming = iprot.readBitSet(1);
++      if (incoming.get(0)) {
++        struct.msg = iprot.readString();
++        struct.setMsgIsSet(true);
++      }
++    }
++  }
++
++}
++

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PColumn.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/PColumn.java
index 0000000,0000000..35a83ca
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PColumn.java
@@@ -1,0 -1,0 +1,737 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings("all") public class PColumn implements org.apache.thrift.TBase<PColumn, PColumn._Fields>, java.io.Serializable, Cloneable {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PColumn");
++
++  private static final org.apache.thrift.protocol.TField COL_FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("colFamily", org.apache.thrift.protocol.TType.STRING, (short)1);
++  private static final org.apache.thrift.protocol.TField COL_QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("colQualifier", org.apache.thrift.protocol.TType.STRING, (short)2);
++  private static final org.apache.thrift.protocol.TField COL_VISIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("colVisibility", org.apache.thrift.protocol.TType.STRING, (short)3);
++  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)4);
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new PColumnStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new PColumnTupleSchemeFactory());
++  }
++
++  public ByteBuffer colFamily; // required
++  public ByteBuffer colQualifier; // required
++  public ByteBuffer colVisibility; // optional
++  public long timestamp; // optional
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++    COL_FAMILY((short)1, "colFamily"),
++    COL_QUALIFIER((short)2, "colQualifier"),
++    COL_VISIBILITY((short)3, "colVisibility"),
++    TIMESTAMP((short)4, "timestamp");
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        case 1: // COL_FAMILY
++          return COL_FAMILY;
++        case 2: // COL_QUALIFIER
++          return COL_QUALIFIER;
++        case 3: // COL_VISIBILITY
++          return COL_VISIBILITY;
++        case 4: // TIMESTAMP
++          return TIMESTAMP;
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++
++  // isset id assignments
++  private static final int __TIMESTAMP_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private _Fields optionals[] = {_Fields.COL_VISIBILITY,_Fields.TIMESTAMP};
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    tmpMap.put(_Fields.COL_FAMILY, new org.apache.thrift.meta_data.FieldMetaData("colFamily", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.COL_QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("colQualifier", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.COL_VISIBILITY, new org.apache.thrift.meta_data.FieldMetaData("colVisibility", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PColumn.class, metaDataMap);
++  }
++
++  public PColumn() {
++  }
++
++  public PColumn(
++    ByteBuffer colFamily,
++    ByteBuffer colQualifier)
++  {
++    this();
++    this.colFamily = colFamily;
++    this.colQualifier = colQualifier;
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public PColumn(PColumn other) {
++    __isset_bitfield = other.__isset_bitfield;
++    if (other.isSetColFamily()) {
++      this.colFamily = org.apache.thrift.TBaseHelper.copyBinary(other.colFamily);
++;
++    }
++    if (other.isSetColQualifier()) {
++      this.colQualifier = org.apache.thrift.TBaseHelper.copyBinary(other.colQualifier);
++;
++    }
++    if (other.isSetColVisibility()) {
++      this.colVisibility = org.apache.thrift.TBaseHelper.copyBinary(other.colVisibility);
++;
++    }
++    this.timestamp = other.timestamp;
++  }
++
++  public PColumn deepCopy() {
++    return new PColumn(this);
++  }
++
++  @Override
++  public void clear() {
++    this.colFamily = null;
++    this.colQualifier = null;
++    this.colVisibility = null;
++    setTimestampIsSet(false);
++    this.timestamp = 0;
++  }
++
++  public byte[] getColFamily() {
++    setColFamily(org.apache.thrift.TBaseHelper.rightSize(colFamily));
++    return colFamily == null ? null : colFamily.array();
++  }
++
++  public ByteBuffer bufferForColFamily() {
++    return colFamily;
++  }
++
++  public PColumn setColFamily(byte[] colFamily) {
++    setColFamily(colFamily == null ? (ByteBuffer)null : ByteBuffer.wrap(colFamily));
++    return this;
++  }
++
++  public PColumn setColFamily(ByteBuffer colFamily) {
++    this.colFamily = colFamily;
++    return this;
++  }
++
++  public void unsetColFamily() {
++    this.colFamily = null;
++  }
++
++  /** Returns true if field colFamily is set (has been assigned a value) and false otherwise */
++  public boolean isSetColFamily() {
++    return this.colFamily != null;
++  }
++
++  public void setColFamilyIsSet(boolean value) {
++    if (!value) {
++      this.colFamily = null;
++    }
++  }
++
++  public byte[] getColQualifier() {
++    setColQualifier(org.apache.thrift.TBaseHelper.rightSize(colQualifier));
++    return colQualifier == null ? null : colQualifier.array();
++  }
++
++  public ByteBuffer bufferForColQualifier() {
++    return colQualifier;
++  }
++
++  public PColumn setColQualifier(byte[] colQualifier) {
++    setColQualifier(colQualifier == null ? (ByteBuffer)null : ByteBuffer.wrap(colQualifier));
++    return this;
++  }
++
++  public PColumn setColQualifier(ByteBuffer colQualifier) {
++    this.colQualifier = colQualifier;
++    return this;
++  }
++
++  public void unsetColQualifier() {
++    this.colQualifier = null;
++  }
++
++  /** Returns true if field colQualifier is set (has been assigned a value) and false otherwise */
++  public boolean isSetColQualifier() {
++    return this.colQualifier != null;
++  }
++
++  public void setColQualifierIsSet(boolean value) {
++    if (!value) {
++      this.colQualifier = null;
++    }
++  }
++
++  public byte[] getColVisibility() {
++    setColVisibility(org.apache.thrift.TBaseHelper.rightSize(colVisibility));
++    return colVisibility == null ? null : colVisibility.array();
++  }
++
++  public ByteBuffer bufferForColVisibility() {
++    return colVisibility;
++  }
++
++  public PColumn setColVisibility(byte[] colVisibility) {
++    setColVisibility(colVisibility == null ? (ByteBuffer)null : ByteBuffer.wrap(colVisibility));
++    return this;
++  }
++
++  public PColumn setColVisibility(ByteBuffer colVisibility) {
++    this.colVisibility = colVisibility;
++    return this;
++  }
++
++  public void unsetColVisibility() {
++    this.colVisibility = null;
++  }
++
++  /** Returns true if field colVisibility is set (has been assigned a value) and false otherwise */
++  public boolean isSetColVisibility() {
++    return this.colVisibility != null;
++  }
++
++  public void setColVisibilityIsSet(boolean value) {
++    if (!value) {
++      this.colVisibility = null;
++    }
++  }
++
++  public long getTimestamp() {
++    return this.timestamp;
++  }
++
++  public PColumn setTimestamp(long timestamp) {
++    this.timestamp = timestamp;
++    setTimestampIsSet(true);
++    return this;
++  }
++
++  public void unsetTimestamp() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
++  }
++
++  /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
++  public boolean isSetTimestamp() {
++    return EncodingUtils.testBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
++  }
++
++  public void setTimestampIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    case COL_FAMILY:
++      if (value == null) {
++        unsetColFamily();
++      } else {
++        setColFamily((ByteBuffer)value);
++      }
++      break;
++
++    case COL_QUALIFIER:
++      if (value == null) {
++        unsetColQualifier();
++      } else {
++        setColQualifier((ByteBuffer)value);
++      }
++      break;
++
++    case COL_VISIBILITY:
++      if (value == null) {
++        unsetColVisibility();
++      } else {
++        setColVisibility((ByteBuffer)value);
++      }
++      break;
++
++    case TIMESTAMP:
++      if (value == null) {
++        unsetTimestamp();
++      } else {
++        setTimestamp((Long)value);
++      }
++      break;
++
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    case COL_FAMILY:
++      return getColFamily();
++
++    case COL_QUALIFIER:
++      return getColQualifier();
++
++    case COL_VISIBILITY:
++      return getColVisibility();
++
++    case TIMESTAMP:
++      return Long.valueOf(getTimestamp());
++
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    case COL_FAMILY:
++      return isSetColFamily();
++    case COL_QUALIFIER:
++      return isSetColQualifier();
++    case COL_VISIBILITY:
++      return isSetColVisibility();
++    case TIMESTAMP:
++      return isSetTimestamp();
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof PColumn)
++      return this.equals((PColumn)that);
++    return false;
++  }
++
++  public boolean equals(PColumn that) {
++    if (that == null)
++      return false;
++
++    boolean this_present_colFamily = true && this.isSetColFamily();
++    boolean that_present_colFamily = true && that.isSetColFamily();
++    if (this_present_colFamily || that_present_colFamily) {
++      if (!(this_present_colFamily && that_present_colFamily))
++        return false;
++      if (!this.colFamily.equals(that.colFamily))
++        return false;
++    }
++
++    boolean this_present_colQualifier = true && this.isSetColQualifier();
++    boolean that_present_colQualifier = true && that.isSetColQualifier();
++    if (this_present_colQualifier || that_present_colQualifier) {
++      if (!(this_present_colQualifier && that_present_colQualifier))
++        return false;
++      if (!this.colQualifier.equals(that.colQualifier))
++        return false;
++    }
++
++    boolean this_present_colVisibility = true && this.isSetColVisibility();
++    boolean that_present_colVisibility = true && that.isSetColVisibility();
++    if (this_present_colVisibility || that_present_colVisibility) {
++      if (!(this_present_colVisibility && that_present_colVisibility))
++        return false;
++      if (!this.colVisibility.equals(that.colVisibility))
++        return false;
++    }
++
++    boolean this_present_timestamp = true && this.isSetTimestamp();
++    boolean that_present_timestamp = true && that.isSetTimestamp();
++    if (this_present_timestamp || that_present_timestamp) {
++      if (!(this_present_timestamp && that_present_timestamp))
++        return false;
++      if (this.timestamp != that.timestamp)
++        return false;
++    }
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    return 0;
++  }
++
++  public int compareTo(PColumn other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++    PColumn typedOther = (PColumn)other;
++
++    lastComparison = Boolean.valueOf(isSetColFamily()).compareTo(typedOther.isSetColFamily());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetColFamily()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colFamily, typedOther.colFamily);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetColQualifier()).compareTo(typedOther.isSetColQualifier());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetColQualifier()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colQualifier, typedOther.colQualifier);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetColVisibility()).compareTo(typedOther.isSetColVisibility());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetColVisibility()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colVisibility, typedOther.colVisibility);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(typedOther.isSetTimestamp());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTimestamp()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, typedOther.timestamp);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("PColumn(");
++    boolean first = true;
++
++    sb.append("colFamily:");
++    if (this.colFamily == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.colFamily, sb);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("colQualifier:");
++    if (this.colQualifier == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.colQualifier, sb);
++    }
++    first = false;
++    if (isSetColVisibility()) {
++      if (!first) sb.append(", ");
++      sb.append("colVisibility:");
++      if (this.colVisibility == null) {
++        sb.append("null");
++      } else {
++        org.apache.thrift.TBaseHelper.toString(this.colVisibility, sb);
++      }
++      first = false;
++    }
++    if (isSetTimestamp()) {
++      if (!first) sb.append(", ");
++      sb.append("timestamp:");
++      sb.append(this.timestamp);
++      first = false;
++    }
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    // check for sub-struct validity
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class PColumnStandardSchemeFactory implements SchemeFactory {
++    public PColumnStandardScheme getScheme() {
++      return new PColumnStandardScheme();
++    }
++  }
++
++  private static class PColumnStandardScheme extends StandardScheme<PColumn> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, PColumn struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          case 1: // COL_FAMILY
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.colFamily = iprot.readBinary();
++              struct.setColFamilyIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 2: // COL_QUALIFIER
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.colQualifier = iprot.readBinary();
++              struct.setColQualifierIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 3: // COL_VISIBILITY
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.colVisibility = iprot.readBinary();
++              struct.setColVisibilityIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 4: // TIMESTAMP
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.timestamp = iprot.readI64();
++              struct.setTimestampIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++
++      // check for required fields of primitive type, which can't be checked in the validate method
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, PColumn struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      if (struct.colFamily != null) {
++        oprot.writeFieldBegin(COL_FAMILY_FIELD_DESC);
++        oprot.writeBinary(struct.colFamily);
++        oprot.writeFieldEnd();
++      }
++      if (struct.colQualifier != null) {
++        oprot.writeFieldBegin(COL_QUALIFIER_FIELD_DESC);
++        oprot.writeBinary(struct.colQualifier);
++        oprot.writeFieldEnd();
++      }
++      if (struct.colVisibility != null) {
++        if (struct.isSetColVisibility()) {
++          oprot.writeFieldBegin(COL_VISIBILITY_FIELD_DESC);
++          oprot.writeBinary(struct.colVisibility);
++          oprot.writeFieldEnd();
++        }
++      }
++      if (struct.isSetTimestamp()) {
++        oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC);
++        oprot.writeI64(struct.timestamp);
++        oprot.writeFieldEnd();
++      }
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class PColumnTupleSchemeFactory implements SchemeFactory {
++    public PColumnTupleScheme getScheme() {
++      return new PColumnTupleScheme();
++    }
++  }
++
++  private static class PColumnTupleScheme extends TupleScheme<PColumn> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, PColumn struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++      BitSet optionals = new BitSet();
++      if (struct.isSetColFamily()) {
++        optionals.set(0);
++      }
++      if (struct.isSetColQualifier()) {
++        optionals.set(1);
++      }
++      if (struct.isSetColVisibility()) {
++        optionals.set(2);
++      }
++      if (struct.isSetTimestamp()) {
++        optionals.set(3);
++      }
++      oprot.writeBitSet(optionals, 4);
++      if (struct.isSetColFamily()) {
++        oprot.writeBinary(struct.colFamily);
++      }
++      if (struct.isSetColQualifier()) {
++        oprot.writeBinary(struct.colQualifier);
++      }
++      if (struct.isSetColVisibility()) {
++        oprot.writeBinary(struct.colVisibility);
++      }
++      if (struct.isSetTimestamp()) {
++        oprot.writeI64(struct.timestamp);
++      }
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, PColumn struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++      BitSet incoming = iprot.readBitSet(4);
++      if (incoming.get(0)) {
++        struct.colFamily = iprot.readBinary();
++        struct.setColFamilyIsSet(true);
++      }
++      if (incoming.get(1)) {
++        struct.colQualifier = iprot.readBinary();
++        struct.setColQualifierIsSet(true);
++      }
++      if (incoming.get(2)) {
++        struct.colVisibility = iprot.readBinary();
++        struct.setColVisibilityIsSet(true);
++      }
++      if (incoming.get(3)) {
++        struct.timestamp = iprot.readI64();
++        struct.setTimestampIsSet(true);
++      }
++    }
++  }
++
++}
++

http://git-wip-us.apache.org/repos/asf/accumulo/blob/58fcad6e/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PColumnUpdate.java
----------------------------------------------------------------------
diff --cc proxy/src/main/java/org/apache/accumulo/proxy/thrift/PColumnUpdate.java
index 0000000,0000000..31e5c27
new file mode 100644
--- /dev/null
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/thrift/PColumnUpdate.java
@@@ -1,0 -1,0 +1,848 @@@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one or more
++ * contributor license agreements.  See the NOTICE file distributed with
++ * this work for additional information regarding copyright ownership.
++ * The ASF licenses this file to You under the Apache License, Version 2.0
++ * (the "License"); you may not use this file except in compliance with
++ * the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++/**
++ * Autogenerated by Thrift Compiler (0.9.0)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.accumulo.proxy.thrift;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings("all") public class PColumnUpdate implements org.apache.thrift.TBase<PColumnUpdate, PColumnUpdate._Fields>, java.io.Serializable, Cloneable {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PColumnUpdate");
++
++  private static final org.apache.thrift.protocol.TField COL_FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("colFamily", org.apache.thrift.protocol.TType.STRING, (short)1);
++  private static final org.apache.thrift.protocol.TField COL_QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("colQualifier", org.apache.thrift.protocol.TType.STRING, (short)2);
++  private static final org.apache.thrift.protocol.TField COL_VISIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("colVisibility", org.apache.thrift.protocol.TType.STRING, (short)3);
++  private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)4);
++  private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)5);
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new PColumnUpdateStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new PColumnUpdateTupleSchemeFactory());
++  }
++
++  public ByteBuffer colFamily; // required
++  public ByteBuffer colQualifier; // required
++  public ByteBuffer colVisibility; // optional
++  public long timestamp; // optional
++  public ByteBuffer value; // required
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  @SuppressWarnings("all") public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++    COL_FAMILY((short)1, "colFamily"),
++    COL_QUALIFIER((short)2, "colQualifier"),
++    COL_VISIBILITY((short)3, "colVisibility"),
++    TIMESTAMP((short)4, "timestamp"),
++    VALUE((short)5, "value");
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        case 1: // COL_FAMILY
++          return COL_FAMILY;
++        case 2: // COL_QUALIFIER
++          return COL_QUALIFIER;
++        case 3: // COL_VISIBILITY
++          return COL_VISIBILITY;
++        case 4: // TIMESTAMP
++          return TIMESTAMP;
++        case 5: // VALUE
++          return VALUE;
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++
++  // isset id assignments
++  private static final int __TIMESTAMP_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private _Fields optionals[] = {_Fields.COL_VISIBILITY,_Fields.TIMESTAMP};
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    tmpMap.put(_Fields.COL_FAMILY, new org.apache.thrift.meta_data.FieldMetaData("colFamily", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.COL_QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("colQualifier", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.COL_VISIBILITY, new org.apache.thrift.meta_data.FieldMetaData("colVisibility", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PColumnUpdate.class, metaDataMap);
++  }
++
++  public PColumnUpdate() {
++  }
++
++  public PColumnUpdate(
++    ByteBuffer colFamily,
++    ByteBuffer colQualifier,
++    ByteBuffer value)
++  {
++    this();
++    this.colFamily = colFamily;
++    this.colQualifier = colQualifier;
++    this.value = value;
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public PColumnUpdate(PColumnUpdate other) {
++    __isset_bitfield = other.__isset_bitfield;
++    if (other.isSetColFamily()) {
++      this.colFamily = org.apache.thrift.TBaseHelper.copyBinary(other.colFamily);
++;
++    }
++    if (other.isSetColQualifier()) {
++      this.colQualifier = org.apache.thrift.TBaseHelper.copyBinary(other.colQualifier);
++;
++    }
++    if (other.isSetColVisibility()) {
++      this.colVisibility = org.apache.thrift.TBaseHelper.copyBinary(other.colVisibility);
++;
++    }
++    this.timestamp = other.timestamp;
++    if (other.isSetValue()) {
++      this.value = org.apache.thrift.TBaseHelper.copyBinary(other.value);
++;
++    }
++  }
++
++  public PColumnUpdate deepCopy() {
++    return new PColumnUpdate(this);
++  }
++
++  @Override
++  public void clear() {
++    this.colFamily = null;
++    this.colQualifier = null;
++    this.colVisibility = null;
++    setTimestampIsSet(false);
++    this.timestamp = 0;
++    this.value = null;
++  }
++
++  public byte[] getColFamily() {
++    setColFamily(org.apache.thrift.TBaseHelper.rightSize(colFamily));
++    return colFamily == null ? null : colFamily.array();
++  }
++
++  public ByteBuffer bufferForColFamily() {
++    return colFamily;
++  }
++
++  public PColumnUpdate setColFamily(byte[] colFamily) {
++    setColFamily(colFamily == null ? (ByteBuffer)null : ByteBuffer.wrap(colFamily));
++    return this;
++  }
++
++  public PColumnUpdate setColFamily(ByteBuffer colFamily) {
++    this.colFamily = colFamily;
++    return this;
++  }
++
++  public void unsetColFamily() {
++    this.colFamily = null;
++  }
++
++  /** Returns true if field colFamily is set (has been assigned a value) and false otherwise */
++  public boolean isSetColFamily() {
++    return this.colFamily != null;
++  }
++
++  public void setColFamilyIsSet(boolean value) {
++    if (!value) {
++      this.colFamily = null;
++    }
++  }
++
++  public byte[] getColQualifier() {
++    setColQualifier(org.apache.thrift.TBaseHelper.rightSize(colQualifier));
++    return colQualifier == null ? null : colQualifier.array();
++  }
++
++  public ByteBuffer bufferForColQualifier() {
++    return colQualifier;
++  }
++
++  public PColumnUpdate setColQualifier(byte[] colQualifier) {
++    setColQualifier(colQualifier == null ? (ByteBuffer)null : ByteBuffer.wrap(colQualifier));
++    return this;
++  }
++
++  public PColumnUpdate setColQualifier(ByteBuffer colQualifier) {
++    this.colQualifier = colQualifier;
++    return this;
++  }
++
++  public void unsetColQualifier() {
++    this.colQualifier = null;
++  }
++
++  /** Returns true if field colQualifier is set (has been assigned a value) and false otherwise */
++  public boolean isSetColQualifier() {
++    return this.colQualifier != null;
++  }
++
++  public void setColQualifierIsSet(boolean value) {
++    if (!value) {
++      this.colQualifier = null;
++    }
++  }
++
++  public byte[] getColVisibility() {
++    setColVisibility(org.apache.thrift.TBaseHelper.rightSize(colVisibility));
++    return colVisibility == null ? null : colVisibility.array();
++  }
++
++  public ByteBuffer bufferForColVisibility() {
++    return colVisibility;
++  }
++
++  public PColumnUpdate setColVisibility(byte[] colVisibility) {
++    setColVisibility(colVisibility == null ? (ByteBuffer)null : ByteBuffer.wrap(colVisibility));
++    return this;
++  }
++
++  public PColumnUpdate setColVisibility(ByteBuffer colVisibility) {
++    this.colVisibility = colVisibility;
++    return this;
++  }
++
++  public void unsetColVisibility() {
++    this.colVisibility = null;
++  }
++
++  /** Returns true if field colVisibility is set (has been assigned a value) and false otherwise */
++  public boolean isSetColVisibility() {
++    return this.colVisibility != null;
++  }
++
++  public void setColVisibilityIsSet(boolean value) {
++    if (!value) {
++      this.colVisibility = null;
++    }
++  }
++
++  public long getTimestamp() {
++    return this.timestamp;
++  }
++
++  public PColumnUpdate setTimestamp(long timestamp) {
++    this.timestamp = timestamp;
++    setTimestampIsSet(true);
++    return this;
++  }
++
++  public void unsetTimestamp() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
++  }
++
++  /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */
++  public boolean isSetTimestamp() {
++    return EncodingUtils.testBit(__isset_bitfield, __TIMESTAMP_ISSET_ID);
++  }
++
++  public void setTimestampIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value);
++  }
++
++  public byte[] getValue() {
++    setValue(org.apache.thrift.TBaseHelper.rightSize(value));
++    return value == null ? null : value.array();
++  }
++
++  public ByteBuffer bufferForValue() {
++    return value;
++  }
++
++  public PColumnUpdate setValue(byte[] value) {
++    setValue(value == null ? (ByteBuffer)null : ByteBuffer.wrap(value));
++    return this;
++  }
++
++  public PColumnUpdate setValue(ByteBuffer value) {
++    this.value = value;
++    return this;
++  }
++
++  public void unsetValue() {
++    this.value = null;
++  }
++
++  /** Returns true if field value is set (has been assigned a value) and false otherwise */
++  public boolean isSetValue() {
++    return this.value != null;
++  }
++
++  public void setValueIsSet(boolean value) {
++    if (!value) {
++      this.value = null;
++    }
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    case COL_FAMILY:
++      if (value == null) {
++        unsetColFamily();
++      } else {
++        setColFamily((ByteBuffer)value);
++      }
++      break;
++
++    case COL_QUALIFIER:
++      if (value == null) {
++        unsetColQualifier();
++      } else {
++        setColQualifier((ByteBuffer)value);
++      }
++      break;
++
++    case COL_VISIBILITY:
++      if (value == null) {
++        unsetColVisibility();
++      } else {
++        setColVisibility((ByteBuffer)value);
++      }
++      break;
++
++    case TIMESTAMP:
++      if (value == null) {
++        unsetTimestamp();
++      } else {
++        setTimestamp((Long)value);
++      }
++      break;
++
++    case VALUE:
++      if (value == null) {
++        unsetValue();
++      } else {
++        setValue((ByteBuffer)value);
++      }
++      break;
++
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    case COL_FAMILY:
++      return getColFamily();
++
++    case COL_QUALIFIER:
++      return getColQualifier();
++
++    case COL_VISIBILITY:
++      return getColVisibility();
++
++    case TIMESTAMP:
++      return Long.valueOf(getTimestamp());
++
++    case VALUE:
++      return getValue();
++
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    case COL_FAMILY:
++      return isSetColFamily();
++    case COL_QUALIFIER:
++      return isSetColQualifier();
++    case COL_VISIBILITY:
++      return isSetColVisibility();
++    case TIMESTAMP:
++      return isSetTimestamp();
++    case VALUE:
++      return isSetValue();
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof PColumnUpdate)
++      return this.equals((PColumnUpdate)that);
++    return false;
++  }
++
++  public boolean equals(PColumnUpdate that) {
++    if (that == null)
++      return false;
++
++    boolean this_present_colFamily = true && this.isSetColFamily();
++    boolean that_present_colFamily = true && that.isSetColFamily();
++    if (this_present_colFamily || that_present_colFamily) {
++      if (!(this_present_colFamily && that_present_colFamily))
++        return false;
++      if (!this.colFamily.equals(that.colFamily))
++        return false;
++    }
++
++    boolean this_present_colQualifier = true && this.isSetColQualifier();
++    boolean that_present_colQualifier = true && that.isSetColQualifier();
++    if (this_present_colQualifier || that_present_colQualifier) {
++      if (!(this_present_colQualifier && that_present_colQualifier))
++        return false;
++      if (!this.colQualifier.equals(that.colQualifier))
++        return false;
++    }
++
++    boolean this_present_colVisibility = true && this.isSetColVisibility();
++    boolean that_present_colVisibility = true && that.isSetColVisibility();
++    if (this_present_colVisibility || that_present_colVisibility) {
++      if (!(this_present_colVisibility && that_present_colVisibility))
++        return false;
++      if (!this.colVisibility.equals(that.colVisibility))
++        return false;
++    }
++
++    boolean this_present_timestamp = true && this.isSetTimestamp();
++    boolean that_present_timestamp = true && that.isSetTimestamp();
++    if (this_present_timestamp || that_present_timestamp) {
++      if (!(this_present_timestamp && that_present_timestamp))
++        return false;
++      if (this.timestamp != that.timestamp)
++        return false;
++    }
++
++    boolean this_present_value = true && this.isSetValue();
++    boolean that_present_value = true && that.isSetValue();
++    if (this_present_value || that_present_value) {
++      if (!(this_present_value && that_present_value))
++        return false;
++      if (!this.value.equals(that.value))
++        return false;
++    }
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    return 0;
++  }
++
++  public int compareTo(PColumnUpdate other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++    PColumnUpdate typedOther = (PColumnUpdate)other;
++
++    lastComparison = Boolean.valueOf(isSetColFamily()).compareTo(typedOther.isSetColFamily());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetColFamily()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colFamily, typedOther.colFamily);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetColQualifier()).compareTo(typedOther.isSetColQualifier());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetColQualifier()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colQualifier, typedOther.colQualifier);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetColVisibility()).compareTo(typedOther.isSetColVisibility());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetColVisibility()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colVisibility, typedOther.colVisibility);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(typedOther.isSetTimestamp());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTimestamp()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, typedOther.timestamp);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetValue()).compareTo(typedOther.isSetValue());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetValue()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, typedOther.value);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("PColumnUpdate(");
++    boolean first = true;
++
++    sb.append("colFamily:");
++    if (this.colFamily == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.colFamily, sb);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("colQualifier:");
++    if (this.colQualifier == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.colQualifier, sb);
++    }
++    first = false;
++    if (isSetColVisibility()) {
++      if (!first) sb.append(", ");
++      sb.append("colVisibility:");
++      if (this.colVisibility == null) {
++        sb.append("null");
++      } else {
++        org.apache.thrift.TBaseHelper.toString(this.colVisibility, sb);
++      }
++      first = false;
++    }
++    if (isSetTimestamp()) {
++      if (!first) sb.append(", ");
++      sb.append("timestamp:");
++      sb.append(this.timestamp);
++      first = false;
++    }
++    if (!first) sb.append(", ");
++    sb.append("value:");
++    if (this.value == null) {
++      sb.append("null");
++    } else {
++      org.apache.thrift.TBaseHelper.toString(this.value, sb);
++    }
++    first = false;
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    // check for sub-struct validity
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class PColumnUpdateStandardSchemeFactory implements SchemeFactory {
++    public PColumnUpdateStandardScheme getScheme() {
++      return new PColumnUpdateStandardScheme();
++    }
++  }
++
++  private static class PColumnUpdateStandardScheme extends StandardScheme<PColumnUpdate> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, PColumnUpdate struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          case 1: // COL_FAMILY
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.colFamily = iprot.readBinary();
++              struct.setColFamilyIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 2: // COL_QUALIFIER
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.colQualifier = iprot.readBinary();
++              struct.setColQualifierIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 3: // COL_VISIBILITY
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.colVisibility = iprot.readBinary();
++              struct.setColVisibilityIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 4: // TIMESTAMP
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.timestamp = iprot.readI64();
++              struct.setTimestampIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 5: // VALUE
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.value = iprot.readBinary();
++              struct.setValueIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++
++      // check for required fields of primitive type, which can't be checked in the validate method
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, PColumnUpdate struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      if (struct.colFamily != null) {
++        oprot.writeFieldBegin(COL_FAMILY_FIELD_DESC);
++        oprot.writeBinary(struct.colFamily);
++        oprot.writeFieldEnd();
++      }
++      if (struct.colQualifier != null) {
++        oprot.writeFieldBegin(COL_QUALIFIER_FIELD_DESC);
++        oprot.writeBinary(struct.colQualifier);
++        oprot.writeFieldEnd();
++      }
++      if (struct.colVisibility != null) {
++        if (struct.isSetColVisibility()) {
++          oprot.writeFieldBegin(COL_VISIBILITY_FIELD_DESC);
++          oprot.writeBinary(struct.colVisibility);
++          oprot.writeFieldEnd();
++        }
++      }
++      if (struct.isSetTimestamp()) {
++        oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC);
++        oprot.writeI64(struct.timestamp);
++        oprot.writeFieldEnd();
++      }
++      if (struct.value != null) {
++        oprot.writeFieldBegin(VALUE_FIELD_DESC);
++        oprot.writeBinary(struct.value);
++        oprot.writeFieldEnd();
++      }
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class PColumnUpdateTupleSchemeFactory implements SchemeFactory {
++    public PColumnUpdateTupleScheme getScheme() {
++      return new PColumnUpdateTupleScheme();
++    }
++  }
++
++  private static class PColumnUpdateTupleScheme extends TupleScheme<PColumnUpdate> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, PColumnUpdate struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++      BitSet optionals = new BitSet();
++      if (struct.isSetColFamily()) {
++        optionals.set(0);
++      }
++      if (struct.isSetColQualifier()) {
++        optionals.set(1);
++      }
++      if (struct.isSetColVisibility()) {
++        optionals.set(2);
++      }
++      if (struct.isSetTimestamp()) {
++        optionals.set(3);
++      }
++      if (struct.isSetValue()) {
++        optionals.set(4);
++      }
++      oprot.writeBitSet(optionals, 5);
++      if (struct.isSetColFamily()) {
++        oprot.writeBinary(struct.colFamily);
++      }
++      if (struct.isSetColQualifier()) {
++        oprot.writeBinary(struct.colQualifier);
++      }
++      if (struct.isSetColVisibility()) {
++        oprot.writeBinary(struct.colVisibility);
++      }
++      if (struct.isSetTimestamp()) {
++        oprot.writeI64(struct.timestamp);
++      }
++      if (struct.isSetValue()) {
++        oprot.writeBinary(struct.value);
++      }
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, PColumnUpdate struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++      BitSet incoming = iprot.readBitSet(5);
++      if (incoming.get(0)) {
++        struct.colFamily = iprot.readBinary();
++        struct.setColFamilyIsSet(true);
++      }
++      if (incoming.get(1)) {
++        struct.colQualifier = iprot.readBinary();
++        struct.setColQualifierIsSet(true);
++      }
++      if (incoming.get(2)) {
++        struct.colVisibility = iprot.readBinary();
++        struct.setColVisibilityIsSet(true);
++      }
++      if (incoming.get(3)) {
++        struct.timestamp = iprot.readI64();
++        struct.setTimestampIsSet(true);
++      }
++      if (incoming.get(4)) {
++        struct.value = iprot.readBinary();
++        struct.setValueIsSet(true);
++      }
++    }
++  }
++
++}
++


[03/15] git commit: ACCUMULO-652 initial mods to RFile to keep track of extra block statistics

Posted by el...@apache.org.
ACCUMULO-652 initial mods to RFile to keep track of extra block statistics

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1354475 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/3fcd07de
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/3fcd07de
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/3fcd07de

Branch: refs/heads/ACCUMULO-652
Commit: 3fcd07de50699ae829ddbe892579126837306ab4
Parents: fd77a56
Author: Adam Fuchs <af...@apache.org>
Authored: Wed Jun 27 12:48:16 2012 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Wed Jun 27 12:48:16 2012 +0000

----------------------------------------------------------------------
 .../core/file/rfile/MultiLevelIndex.java        | 457 ++++++++++---------
 .../apache/accumulo/core/file/rfile/RFile.java  | 183 +++++++-
 .../accumulo/core/iterators/Filterer.java       |  24 +
 .../accumulo/core/iterators/Predicate.java      |  24 +
 .../predicates/TimestampRangePredicate.java     |  54 +++
 .../core/iterators/system/HeapIterator.java     |  14 +-
 .../core/iterators/system/VisibilityFilter.java |  21 +-
 .../core/security/ColumnVisibility.java         | 342 +++++++++-----
 .../core/security/VisibilityConstraint.java     |   9 +-
 .../core/file/rfile/MultiLevelIndexTest.java    |  16 +-
 .../accumulo/core/file/rfile/RFileTest.java     |  45 +-
 .../core/file/rfile/TimestampFilterTest.java    |  98 ++++
 .../iterators/user/IndexedDocIteratorTest.java  |  14 +-
 .../core/security/ColumnVisibilityTest.java     |  60 ++-
 .../core/security/VisibilityEvaluatorTest.java  |  20 +-
 .../examples/wikisearch/parser/EventFields.java |   6 +-
 16 files changed, 983 insertions(+), 404 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
index b973cc3..e2b4b15 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
@@ -27,10 +27,11 @@ import java.util.AbstractList;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.Iterator;
 import java.util.List;
-import java.util.ListIterator;
 import java.util.Map;
 import java.util.RandomAccess;
+import java.util.Stack;
 
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.file.blockfile.ABlockReader;
@@ -38,37 +39,58 @@ import org.apache.accumulo.core.file.blockfile.ABlockWriter;
 import org.apache.accumulo.core.file.blockfile.BlockFileReader;
 import org.apache.accumulo.core.file.blockfile.BlockFileWriter;
 import org.apache.accumulo.core.file.rfile.bcfile.Utils;
+import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
+import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.hadoop.io.WritableComparable;
 
 public class MultiLevelIndex {
   
   public static class IndexEntry implements WritableComparable<IndexEntry> {
     private Key key;
+    private long minTimestamp;
+    private long maxTimestamp;
+    private ColumnVisibility minimumVisibility = null;
     private int entries;
     private long offset;
     private long compressedSize;
     private long rawSize;
-    private boolean newFormat;
+    private int format;
     
-    IndexEntry(Key k, int e, long offset, long compressedSize, long rawSize) {
+    IndexEntry(Key k, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int e, long offset, long compressedSize, long rawSize, int version) {
       this.key = k;
+      this.minTimestamp = minTimestamp;
+      this.maxTimestamp = maxTimestamp;
+      this.minimumVisibility = minimumVisibility;
       this.entries = e;
       this.offset = offset;
       this.compressedSize = compressedSize;
       this.rawSize = rawSize;
-      newFormat = true;
+      format = version;
     }
     
-    public IndexEntry(boolean newFormat) {
-      this.newFormat = newFormat;
+    public IndexEntry(int format) {
+      this.format = format;
     }
     
     @Override
     public void readFields(DataInput in) throws IOException {
       key = new Key();
       key.readFields(in);
+      if(format == RFile.RINDEX_VER_7)
+      {
+        minTimestamp = in.readLong();
+        maxTimestamp = in.readLong();
+        byte[] visibility = new byte[in.readInt()];
+        in.readFully(visibility);
+        minimumVisibility = new ColumnVisibility(visibility);
+      }
+      else
+      {
+        minTimestamp = Long.MIN_VALUE;
+        maxTimestamp = Long.MAX_VALUE;
+      }
       entries = in.readInt();
-      if (newFormat) {
+      if (format == RFile.RINDEX_VER_6 || format == RFile.RINDEX_VER_7) {
         offset = Utils.readVLong(in);
         compressedSize = Utils.readVLong(in);
         rawSize = Utils.readVLong(in);
@@ -82,8 +104,16 @@ public class MultiLevelIndex {
     @Override
     public void write(DataOutput out) throws IOException {
       key.write(out);
+      if(format == RFile.RINDEX_VER_7)
+      {
+        out.writeLong(minTimestamp);
+        out.writeLong(maxTimestamp);
+        byte[] visibility = minimumVisibility.getExpression();
+        out.writeInt(visibility.length);
+        out.write(visibility);
+      }
       out.writeInt(entries);
-      if (newFormat) {
+      if (format == RFile.RINDEX_VER_6 || format == RFile.RINDEX_VER_7) {
         Utils.writeVLong(out, offset);
         Utils.writeVLong(out, compressedSize);
         Utils.writeVLong(out, rawSize);
@@ -121,12 +151,12 @@ public class MultiLevelIndex {
     
     private int[] offsets;
     private byte[] data;
-    private boolean newFormat;
+    private int format;
     
-    SerializedIndex(int[] offsets, byte[] data, boolean newFormat) {
+    SerializedIndex(int[] offsets, byte[] data, int format) {
       this.offsets = offsets;
       this.data = data;
-      this.newFormat = newFormat;
+      this.format = format;
     }
     
     @Override
@@ -140,7 +170,7 @@ public class MultiLevelIndex {
       ByteArrayInputStream bais = new ByteArrayInputStream(data, offsets[index], len);
       DataInputStream dis = new DataInputStream(bais);
       
-      IndexEntry ie = new IndexEntry(newFormat);
+      IndexEntry ie = new IndexEntry(format);
       try {
         ie.readFields(dis);
       } catch (IOException e) {
@@ -203,6 +233,10 @@ public class MultiLevelIndex {
     private ByteArrayOutputStream indexBytes;
     private DataOutputStream indexOut;
     
+    private long minTimestamp = Long.MAX_VALUE;
+    private long maxTimestamp = Long.MIN_VALUE;
+    private ColumnVisibility minimumVisibility = null;
+    
     private ArrayList<Integer> offsets;
     private int level;
     private int offset;
@@ -212,8 +246,6 @@ public class MultiLevelIndex {
     private boolean hasNext;
     
     public IndexBlock(int level, int totalAdded) {
-      // System.out.println("IndexBlock("+level+","+levelCount+","+totalAdded+")");
-      
       this.level = level;
       this.offset = totalAdded;
       
@@ -224,9 +256,17 @@ public class MultiLevelIndex {
     
     public IndexBlock() {}
     
-    public void add(Key key, int value, long offset, long compressedSize, long rawSize) throws IOException {
+    public void add(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int value, long offset, long compressedSize, long rawSize, int version) throws IOException {
       offsets.add(indexOut.size());
-      new IndexEntry(key, value, offset, compressedSize, rawSize).write(indexOut);
+      if (this.minTimestamp > minTimestamp)
+        this.minTimestamp = minTimestamp;
+      if (this.maxTimestamp < maxTimestamp)
+        this.maxTimestamp = maxTimestamp;
+      if(this.minimumVisibility == null)
+        this.minimumVisibility = minimumVisibility;
+      else
+        this.minimumVisibility = this.minimumVisibility.or(minimumVisibility);
+      new IndexEntry(key, minTimestamp, maxTimestamp, minimumVisibility, value, offset, compressedSize, rawSize, version).write(indexOut);
     }
     
     int getSize() {
@@ -252,7 +292,7 @@ public class MultiLevelIndex {
     
     public void readFields(DataInput in, int version) throws IOException {
       
-      if (version == RFile.RINDEX_VER_6) {
+      if (version == RFile.RINDEX_VER_6 || version == RFile.RINDEX_VER_7) {
         level = in.readInt();
         offset = in.readInt();
         hasNext = in.readBoolean();
@@ -267,7 +307,7 @@ public class MultiLevelIndex {
         byte[] serializedIndex = new byte[indexSize];
         in.readFully(serializedIndex);
         
-        index = new SerializedIndex(offsets, serializedIndex, true);
+        index = new SerializedIndex(offsets, serializedIndex, version);
         keyIndex = new KeyIndex(offsets, serializedIndex);
       } else if (version == RFile.RINDEX_VER_3) {
         level = 0;
@@ -281,7 +321,7 @@ public class MultiLevelIndex {
         ArrayList<Integer> oal = new ArrayList<Integer>();
         
         for (int i = 0; i < size; i++) {
-          IndexEntry ie = new IndexEntry(false);
+          IndexEntry ie = new IndexEntry(version);
           oal.add(dos.size());
           ie.readFields(in);
           ie.write(dos);
@@ -295,7 +335,7 @@ public class MultiLevelIndex {
         }
         
         byte[] serializedIndex = baos.toByteArray();
-        index = new SerializedIndex(oia, serializedIndex, false);
+        index = new SerializedIndex(oia, serializedIndex, version);
         keyIndex = new KeyIndex(oia, serializedIndex);
       } else if (version == RFile.RINDEX_VER_4) {
         level = 0;
@@ -312,7 +352,7 @@ public class MultiLevelIndex {
         byte[] indexData = new byte[size];
         in.readFully(indexData);
         
-        index = new SerializedIndex(offsets, indexData, false);
+        index = new SerializedIndex(offsets, indexData, version);
         keyIndex = new KeyIndex(offsets, indexData);
       } else {
         throw new RuntimeException("Unexpected version " + version);
@@ -356,12 +396,14 @@ public class MultiLevelIndex {
     private DataOutputStream buffer;
     private int buffered;
     private ByteArrayOutputStream baos;
+    private final int version;
     
     public BufferedWriter(Writer writer) {
       this.writer = writer;
       baos = new ByteArrayOutputStream(1 << 20);
       buffer = new DataOutputStream(baos);
       buffered = 0;
+      version = RFile.RINDEX_VER_7;
     }
     
     private void flush() throws IOException {
@@ -369,10 +411,10 @@ public class MultiLevelIndex {
       
       DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
       
-      IndexEntry ie = new IndexEntry(true);
+      IndexEntry ie = new IndexEntry(version);
       for (int i = 0; i < buffered; i++) {
         ie.readFields(dis);
-        writer.add(ie.getKey(), ie.getNumEntries(), ie.getOffset(), ie.getCompressedSize(), ie.getRawSize());
+        writer.add(ie.getKey(), ie.minTimestamp, ie.maxTimestamp, ie.minimumVisibility, ie.getNumEntries(), ie.getOffset(), ie.getCompressedSize(), ie.getRawSize(), ie.format);
       }
       
       buffered = 0;
@@ -381,18 +423,18 @@ public class MultiLevelIndex {
       
     }
     
-    public void add(Key key, int data, long offset, long compressedSize, long rawSize) throws IOException {
+    public void add(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, int version) throws IOException {
       if (buffer.size() > (10 * 1 << 20)) {
         flush();
       }
       
-      new IndexEntry(key, data, offset, compressedSize, rawSize).write(buffer);
+      new IndexEntry(key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, version).write(buffer);
       buffered++;
     }
     
-    public void addLast(Key key, int data, long offset, long compressedSize, long rawSize) throws IOException {
+    public void addLast(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, int version) throws IOException {
       flush();
-      writer.addLast(key, data, offset, compressedSize, rawSize);
+      writer.addLast(key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, version);
     }
     
     public void close(DataOutput out) throws IOException {
@@ -417,30 +459,26 @@ public class MultiLevelIndex {
       levels = new ArrayList<IndexBlock>();
     }
     
-    private void add(int level, Key key, int data, long offset, long compressedSize, long rawSize) throws IOException {
+    private void add(int level, Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, boolean last, int version)
+        throws IOException {
       if (level == levels.size()) {
         levels.add(new IndexBlock(level, 0));
       }
       
       IndexBlock iblock = levels.get(level);
       
-      iblock.add(key, data, offset, compressedSize, rawSize);
-    }
-    
-    private void flush(int level, Key lastKey, boolean last) throws IOException {
+      iblock.add(key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, version);
       
       if (last && level == levels.size() - 1)
         return;
       
-      IndexBlock iblock = levels.get(level);
       if ((iblock.getSize() > threshold && iblock.offsets.size() > 1) || last) {
         ABlockWriter out = blockFileWriter.prepareDataBlock();
         iblock.setHasNext(!last);
         iblock.write(out);
         out.close();
         
-        add(level + 1, lastKey, 0, out.getStartPos(), out.getCompressedSize(), out.getRawSize());
-        flush(level + 1, lastKey, last);
+        add(level + 1, key, iblock.minTimestamp, iblock.maxTimestamp, iblock.minimumVisibility, 0, out.getStartPos(), out.getCompressedSize(), out.getRawSize(), last, version);
         
         if (last)
           levels.set(level, null);
@@ -449,19 +487,17 @@ public class MultiLevelIndex {
       }
     }
     
-    public void add(Key key, int data, long offset, long compressedSize, long rawSize) throws IOException {
+    public void add(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, int version) throws IOException {
       totalAdded++;
-      add(0, key, data, offset, compressedSize, rawSize);
-      flush(0, key, false);
+      add(0, key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, false, version);
     }
     
-    public void addLast(Key key, int data, long offset, long compressedSize, long rawSize) throws IOException {
+    public void addLast(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, int version) throws IOException {
       if (addedLast)
         throw new IllegalStateException("already added last");
       
       totalAdded++;
-      add(0, key, data, offset, compressedSize, rawSize);
-      flush(0, key, true);
+      add(0, key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, true, version);
       addedLast = true;
       
     }
@@ -487,215 +523,196 @@ public class MultiLevelIndex {
     private int version;
     private int size;
     
-    public class Node {
-      
-      private Node parent;
-      private IndexBlock indexBlock;
-      private int currentPos;
-      
-      Node(Node parent, IndexBlock iBlock) {
-        this.parent = parent;
-        this.indexBlock = iBlock;
-      }
+    class StackEntry {
+      public final IndexBlock block;
+      public int offset;
       
-      Node(IndexBlock rootInfo) {
-        this.parent = null;
-        this.indexBlock = rootInfo;
-      }
-      
-      private Node lookup(Key key) throws IOException {
-        int pos = Collections.binarySearch(indexBlock.getKeyIndex(), key, new Comparator<Key>() {
-          @Override
-          public int compare(Key o1, Key o2) {
-            return o1.compareTo(o2);
-          }
-        });
-        
-        if (pos < 0)
-          pos = (pos * -1) - 1;
-        
-        if (pos == indexBlock.getIndex().size()) {
-          if (parent != null)
-            throw new IllegalStateException();
-          this.currentPos = pos;
-          return this;
-        }
-        
-        this.currentPos = pos;
-        
-        if (indexBlock.getLevel() == 0) {
-          return this;
-        }
-        
-        IndexEntry ie = indexBlock.getIndex().get(pos);
-        Node child = new Node(this, getIndexBlock(ie));
-        return child.lookup(key);
-      }
-      
-      private Node getLast() throws IOException {
-        currentPos = indexBlock.getIndex().size() - 1;
-        if (indexBlock.getLevel() == 0)
-          return this;
-        
-        IndexEntry ie = indexBlock.getIndex().get(currentPos);
-        Node child = new Node(this, getIndexBlock(ie));
-        return child.getLast();
-      }
-      
-      private Node getFirst() throws IOException {
-        currentPos = 0;
-        if (indexBlock.getLevel() == 0)
-          return this;
-        
-        IndexEntry ie = indexBlock.getIndex().get(currentPos);
-        Node child = new Node(this, getIndexBlock(ie));
-        return child.getFirst();
-      }
-      
-      private Node getPrevious() throws IOException {
-        if (currentPos == 0)
-          return parent.getPrevious();
-        
-        currentPos--;
-        
-        IndexEntry ie = indexBlock.getIndex().get(currentPos);
-        Node child = new Node(this, getIndexBlock(ie));
-        return child.getLast();
-        
-      }
-      
-      private Node getNext() throws IOException {
-        if (currentPos == indexBlock.getIndex().size() - 1)
-          return parent.getNext();
-        
-        currentPos++;
-        
-        IndexEntry ie = indexBlock.getIndex().get(currentPos);
-        Node child = new Node(this, getIndexBlock(ie));
-        return child.getFirst();
-        
-      }
-      
-      Node getNextNode() throws IOException {
-        return parent.getNext();
-      }
-      
-      Node getPreviousNode() throws IOException {
-        return parent.getPrevious();
+      public StackEntry(IndexBlock block, int offset) {
+        this.block = block;
+        this.offset = offset;
       }
     }
     
-    public class IndexIterator implements ListIterator<IndexEntry> {
-      
-      private Node node;
-      private ListIterator<IndexEntry> liter;
+    class IndexIterator implements Iterator<IndexEntry> {
+      private Stack<StackEntry> position = new Stack<StackEntry>();
+      private final TimestampRangePredicate timestampFilter;
       
-      private Node getPrevNode() {
+      private IndexIterator(TimestampRangePredicate timestampFilter, Key lookupKey) {
+        this.timestampFilter = timestampFilter;
         try {
-          return node.getPreviousNode();
+          seek(lookupKey);
         } catch (IOException e) {
           throw new RuntimeException(e);
         }
       }
       
-      private Node getNextNode() {
-        try {
-          return node.getNextNode();
-        } catch (IOException e) {
-          throw new RuntimeException(e);
+      private final boolean checkFilterIndexEntry(IndexEntry ie) {
+        if(timestampFilter == null)
+        if (timestampFilter != null && (ie.maxTimestamp < timestampFilter.startTimestamp || ie.minTimestamp > timestampFilter.endTimestamp)) {
+          return false;
+        }
+        return true;
+      }
+      
+      private void seek(Key lookupKey) throws IOException {
+        StackEntry top = new StackEntry(rootBlock, -1);
+        position.add(top);
+        while (true) {
+          top = position.peek();
+          // go down the tree
+          int pos = Collections.binarySearch(top.block.getKeyIndex(), lookupKey, new Comparator<Key>() {
+            @Override
+            public int compare(Key o1, Key o2) {
+              return o1.compareTo(o2);
+            }
+          });
+          
+          
+          if (pos < 0) {
+            pos = (pos * -1) - 1;
+          } else if (pos < top.block.getKeyIndex().size()) {
+            // the exact key was found, so we want to go back to the first identical match
+            while (pos > 0 && top.block.getKeyIndex().get(pos - 1).equals(lookupKey)) {
+              pos--;
+            }
+          }
+          
+
+          IndexEntry ie = null;
+          List<IndexEntry> index = top.block.getIndex();
+          
+          if(pos > 0)
+          {
+            // look backwards to find any initial previousEntry that might match the timestamp range such that no entry within the given timestamp range is between the seeked key and the previousKey
+            previousEntry = index.get(pos-1);
+            // TODO: find the offset for this block
+            previousIndex = Integer.MIN_VALUE;
+          }
+          
+          while (pos < index.size()) {
+            ie = index.get(pos);
+            // filter on timestampRange by skipping forward until a block passes the predicate
+            if (checkFilterIndexEntry(ie))
+              break;
+            pos++;
+          }
+          
+          
+          if (pos == index.size()) {
+            position.pop();
+            goToNext();
+            return;
+          } else {
+            if (top.block.level == 0) {
+              // found a matching index entry
+              top.offset = pos - 1;
+              return;
+            } else {
+              top.offset = pos;
+              position.add(new StackEntry(getIndexBlock(ie), 0));
+            }
+          }
         }
       }
       
-      public IndexIterator() {
-        node = null;
-      }
-      
-      public IndexIterator(Node node) {
-        this.node = node;
-        liter = node.indexBlock.getIndex().listIterator(node.currentPos);
-      }
-      
-      @Override
-      public boolean hasNext() {
-        if (node == null)
-          return false;
-        
-        if (!liter.hasNext()) {
-          return node.indexBlock.hasNext();
-        } else {
-          return true;
+      private void goToNext() throws IOException {
+        int numSkippedBlocks = 0;
+        // traverse the index tree forwards
+        while (position.isEmpty() == false) {
+          StackEntry top = position.peek();
+          top.offset++;
+          List<IndexEntry> index = top.block.getIndex();
+          while (top.offset < index.size()) {
+            if (checkFilterIndexEntry(index.get(top.offset)))
+              break;
+            numSkippedBlocks++;
+            top.offset++;
+          }
+          if (top.offset == index.size()) {
+            // go up
+            position.pop();
+          } else {
+            if (top.block.level == 0) {
+              // success!
+              return;
+            }
+            // go down
+            position.add(new StackEntry(getIndexBlock(index.get(top.offset)), -1));
+          }
         }
-        
       }
       
-      public IndexEntry peekPrevious() {
-        IndexEntry ret = previous();
-        next();
-        return ret;
-      }
+      IndexEntry nextEntry = null;
+      IndexEntry previousEntry = null;
+      int nextIndex = -1;
+      int previousIndex = -1;
       
-      public IndexEntry peek() {
-        IndexEntry ret = next();
-        previous();
-        return ret;
+      private void prepNext() {
+        if (nextEntry == null) {
+          try {
+            goToNext();
+          } catch (IOException e) {
+            throw new RuntimeException(e);
+          }
+          if (position.isEmpty())
+            return;
+          StackEntry e = position.peek();
+          nextEntry = e.block.getIndex().get(e.offset);
+          nextIndex = e.block.getOffset() + e.offset;
+        }
       }
       
-      @Override
-      public IndexEntry next() {
-        if (!liter.hasNext()) {
-          node = getNextNode();
-          liter = node.indexBlock.getIndex().listIterator();
-        }
+      public boolean hasNext() {
+        if (nextEntry == null)
+          prepNext();
+        return nextEntry != null;
         
-        return liter.next();
       }
       
-      @Override
+      // initially, previous key is last key of the previous block
       public boolean hasPrevious() {
-        if (node == null)
-          return false;
-        
-        if (!liter.hasPrevious()) {
-          return node.indexBlock.getOffset() > 0;
-        } else {
-          return true;
-        }
+        return previousEntry != null;
       }
       
-      @Override
-      public IndexEntry previous() {
-        if (!liter.hasPrevious()) {
-          node = getPrevNode();
-          liter = node.indexBlock.getIndex().listIterator(node.indexBlock.getIndex().size());
-        }
-        
-        return liter.previous();
+      public int nextIndex() {
+        if (nextEntry == null)
+          prepNext();
+        return nextIndex;
       }
       
-      @Override
-      public int nextIndex() {
-        return node.indexBlock.getOffset() + liter.nextIndex();
+      public IndexEntry peek() {
+        if (nextEntry == null)
+          prepNext();
+        return nextEntry;
       }
       
-      @Override
-      public int previousIndex() {
-        return node.indexBlock.getOffset() + liter.previousIndex();
+      private int blocksReturned = 0;
+      
+      public IndexEntry next() {
+        prepNext();
+        previousEntry = nextEntry;
+        nextEntry = null;
+        previousIndex = nextIndex;
+        nextIndex = -1;
+        return previousEntry;
       }
       
-      @Override
-      public void remove() {
-        throw new UnsupportedOperationException();
+      public IndexEntry peekPrevious() {
+        return previousEntry;
       }
       
+      /*
+       * (non-Javadoc)
+       * 
+       * @see java.util.Iterator#remove()
+       */
       @Override
-      public void set(IndexEntry e) {
+      public void remove() {
         throw new UnsupportedOperationException();
-        
       }
       
-      @Override
-      public void add(IndexEntry e) {
-        throw new UnsupportedOperationException();
+      public int previousIndex() {
+        return previousIndex;
       }
       
     }
@@ -714,16 +731,15 @@ public class MultiLevelIndex {
       return iblock;
     }
     
-    public IndexIterator lookup(Key key) throws IOException {
-      Node node = new Node(rootBlock);
-      return new IndexIterator(node.lookup(key));
+    IndexIterator lookup(Key key) throws IOException {
+      return new IndexIterator(timestampRange, key);
     }
     
     public void readFields(DataInput in) throws IOException {
       
       size = 0;
       
-      if (version == RFile.RINDEX_VER_6) {
+      if (version == RFile.RINDEX_VER_6 || version == RFile.RINDEX_VER_7) {
         size = in.readInt();
       }
       
@@ -769,6 +785,15 @@ public class MultiLevelIndex {
     public Key getLastKey() {
       return rootBlock.getIndex().get(rootBlock.getIndex().size() - 1).getKey();
     }
+    
+    TimestampRangePredicate timestampRange;
+    
+    /**
+     * @param r
+     */
+    public void setTimestampRange(TimestampRangePredicate r) {
+      this.timestampRange = r;
+    }
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
index c2eac1d..06000f8 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
@@ -56,10 +56,14 @@ import org.apache.accumulo.core.file.rfile.MultiLevelIndex.IndexEntry;
 import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader.IndexIterator;
 import org.apache.accumulo.core.file.rfile.RelativeKey.MByteSequence;
 import org.apache.accumulo.core.file.rfile.bcfile.MetaBlockDoesNotExist;
+import org.apache.accumulo.core.iterators.Filterer;
 import org.apache.accumulo.core.iterators.IterationInterruptedException;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
 import org.apache.accumulo.core.iterators.system.HeapIterator;
+import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -77,6 +81,7 @@ public class RFile {
   private RFile() {}
   
   private static final int RINDEX_MAGIC = 0x20637474;
+  static final int RINDEX_VER_7 = 7;
   static final int RINDEX_VER_6 = 6;
   static final int RINDEX_VER_4 = 4;
   static final int RINDEX_VER_3 = 3;
@@ -301,6 +306,11 @@ public class RFile {
     private int indexBlockSize;
     private int entries = 0;
     
+    // some aggregate stats to keep on a per-block basis
+    private long minTimestamp = Long.MAX_VALUE;
+    private long maxTimestamp = Long.MIN_VALUE;
+    private ColumnVisibility minimumVisibility = null;
+    
     private ArrayList<LocalityGroupMetadata> localityGroups = new ArrayList<LocalityGroupMetadata>();
     private LocalityGroupMetadata currentLocalityGroup = null;
     private int nextBlock = 0;
@@ -337,7 +347,7 @@ public class RFile {
       ABlockWriter mba = fileWriter.prepareMetaBlock("RFile.index");
       
       mba.writeInt(RINDEX_MAGIC);
-      mba.writeInt(RINDEX_VER_6);
+      mba.writeInt(RINDEX_VER_7);
       
       if (currentLocalityGroup != null)
         localityGroups.add(currentLocalityGroup);
@@ -368,8 +378,28 @@ public class RFile {
       }
     }
     
+    private void updateBlockStats(Key key, Value value)
+    {
+      if(minTimestamp > key.getTimestamp())
+        minTimestamp = key.getTimestamp();
+      if(maxTimestamp < key.getTimestamp())
+        maxTimestamp = key.getTimestamp();
+      if(minimumVisibility == null)
+        minimumVisibility = new ColumnVisibility(key.getColumnVisibility());
+      else
+        minimumVisibility = minimumVisibility.or(new ColumnVisibility(key.getColumnVisibility()));
+      entries++;
+    }
+    
+    private void clearBlockStats()
+    {
+      minTimestamp = Long.MAX_VALUE;
+      maxTimestamp = Long.MIN_VALUE;
+      minimumVisibility = null;      
+      entries = 0;
+    }
+    
     public void append(Key key, Value value) throws IOException {
-      
       if (dataClosed) {
         throw new IllegalStateException("Cannont append, data closed");
       }
@@ -395,7 +425,8 @@ public class RFile {
       
       rk.write(blockWriter);
       value.write(blockWriter);
-      entries++;
+      updateBlockStats(key,value);
+      
       
       prevKey = new Key(key);
       lastKeyInBlock = prevKey;
@@ -406,13 +437,13 @@ public class RFile {
       blockWriter.close();
       
       if (lastBlock)
-        currentLocalityGroup.indexWriter.addLast(key, entries, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize());
+        currentLocalityGroup.indexWriter.addLast(key, minTimestamp, maxTimestamp, minimumVisibility, entries, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize(), RINDEX_VER_7);
       else
-        currentLocalityGroup.indexWriter.add(key, entries, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize());
+        currentLocalityGroup.indexWriter.add(key, minTimestamp, maxTimestamp, minimumVisibility, entries, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize(), RINDEX_VER_7);
       
+      clearBlockStats();
       blockWriter = null;
       lastKeyInBlock = null;
-      entries = 0;
       nextBlock++;
     }
     
@@ -475,7 +506,7 @@ public class RFile {
     }
   }
   
-  private static class LocalityGroupReader implements FileSKVIterator {
+  private static class LocalityGroupReader implements FileSKVIterator, Filterer<Key,Value> {
     
     private BlockFileReader reader;
     private MultiLevelIndex.Reader index;
@@ -578,7 +609,7 @@ public class RFile {
           return;
         }
       }
-      
+
       prevKey = rk.getKey();
       rk.readFields(currBlock);
       val.readFields(currBlock);
@@ -650,14 +681,15 @@ public class RFile {
       boolean reseek = true;
       
       if (range.afterEndKey(firstKey)) {
-        // range is before first key in rfile, so there is nothing to do
+        // range is before first key in this locality group, so there is nothing to do
         reset();
         reseek = false;
       }
       
-      if (rk != null) {
+      // always reseek if the filter changed since the last seek
+      if (filterChanged == false && rk != null) {
         if (range.beforeStartKey(prevKey) && range.afterEndKey(getTopKey())) {
-          // range is between the two keys in the file where the last range seeked to stopped, so there is
+          // range is between the two keys in the locality group where the last range seeked to stopped, so there is
           // nothing to do
           reseek = false;
         }
@@ -702,12 +734,6 @@ public class RFile {
           // past the last key
         } else {
           
-          // if the index contains the same key multiple times, then go to the
-          // earliest index entry containing the key
-          while (iiter.hasPrevious() && iiter.peekPrevious().getKey().equals(iiter.peek().getKey())) {
-            iiter.previous();
-          }
-          
           if (iiter.hasPrevious())
             prevKey = new Key(iiter.peekPrevious().getKey()); // initially prevKey is the last key of the prev block
           else
@@ -771,9 +797,35 @@ public class RFile {
     public void setInterruptFlag(AtomicBoolean flag) {
       this.interruptFlag = flag;
     }
+    
+    private TimestampRangePredicate timestampRange;
+    private boolean filterChanged = false;
+
+    /* (non-Javadoc)
+     * @see org.apache.accumulo.core.iterators.Filterer#applyFilter(org.apache.accumulo.core.iterators.Predicate)
+     */
+    @Override
+    public void applyFilter(Predicate<Key,Value> filter) {
+      // TODO support general filters
+      if(filter instanceof TimestampRangePredicate)
+      {
+        filterChanged = true;
+        TimestampRangePredicate p = (TimestampRangePredicate)filter;
+        // intersect with previous timestampRange
+        if(timestampRange != null)
+          timestampRange = new TimestampRangePredicate(Math.max(p.startTimestamp, timestampRange.startTimestamp), Math.min(p.endTimestamp, timestampRange.endTimestamp));
+        else
+          timestampRange = p;
+        index.setTimestampRange(timestampRange);
+      }
+      else
+      {
+        throw new RuntimeException("yikes, not yet implemented");
+      }
+    }
   }
   
-  public static class Reader extends HeapIterator implements FileSKVIterator {
+  public static class Reader extends HeapIterator implements FileSKVIterator, Filterer<Key,Value> {
     
     private static final Collection<ByteSequence> EMPTY_CF_SET = Collections.emptySet();
     
@@ -799,7 +851,7 @@ public class RFile {
       
       if (magic != RINDEX_MAGIC)
         throw new IOException("Did not see expected magic number, saw " + magic);
-      if (ver != RINDEX_VER_6 && ver != RINDEX_VER_4 && ver != RINDEX_VER_3)
+      if (ver != RINDEX_VER_7 && ver != RINDEX_VER_6 && ver != RINDEX_VER_4 && ver != RINDEX_VER_3)
         throw new IOException("Did not see expected version, saw " + ver);
       
       int size = mb.readInt();
@@ -947,6 +999,9 @@ public class RFile {
     @Override
     public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
       
+      topKey = null;
+      topValue = null;
+      
       clear();
       
       numLGSeeked = 0;
@@ -1001,6 +1056,8 @@ public class RFile {
         }
         
         if (include) {
+          if(timestampFilter != null)
+            lgr.applyFilter(timestampFilter);
           lgr.seek(range, EMPTY_CF_SET, false);
           addSource(lgr);
           numLGSeeked++;
@@ -1047,6 +1104,94 @@ public class RFile {
         lgr.setInterruptFlag(interruptFlag);
       }
     }
+    
+    ArrayList<Predicate<Key,Value>> filters = new ArrayList<Predicate<Key,Value>>();
+    
+    TimestampRangePredicate timestampFilter = null;
+    
+    Key topKey;
+    Value topValue;
+    
+    /* (non-Javadoc)
+     * @see org.apache.accumulo.core.iterators.system.HeapIterator#hasTop()
+     */
+    @Override
+    public boolean hasTop() {
+      if(topKey == null)
+      {
+        while(super.hasTop())
+        {
+          topKey = super.getTopKey();
+          topValue = super.getTopValue();
+          // check all the filters to see if we found a valid key/value pair
+          boolean keep = true;
+          for(Predicate<Key,Value> filter: filters)
+          {
+            if(!filter.evaluate(topKey, topValue))
+            {
+              keep = false;
+              try {
+                super.next();
+              } catch (IOException e) {
+                throw new RuntimeException(e);
+              }
+              break;
+            }
+          }
+          if(keep == true)
+            return true;
+        }
+        // ran out of key/value pairs
+        topKey = null;
+        topValue = null;
+        return false;
+      }
+      else
+      {
+        return true;
+      }
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.accumulo.core.iterators.system.HeapIterator#next()
+     */
+    @Override
+    public void next() throws IOException {
+      topKey = null;
+      topValue = null;
+      super.next();
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.accumulo.core.iterators.system.HeapIterator#getTopKey()
+     */
+    @Override
+    public Key getTopKey() {
+      if(topKey == null)
+        hasTop();
+      return topKey;
+    }
+    
+    /* (non-Javadoc)
+     * @see org.apache.accumulo.core.iterators.system.HeapIterator#getTopValue()
+     */
+    @Override
+    public Value getTopValue() {
+      if(topValue == null)
+        hasTop();
+      return topValue;
+    }
+    
+    /* (non-Javadoc)
+     * @see org.apache.accumulo.core.iterators.Filterer#applyFilter(org.apache.accumulo.core.iterators.Predicate)
+     */
+    @Override
+    public void applyFilter(Predicate<Key,Value> filter) {
+      filters.add(filter);
+      // the HeapIterator will pass this filter on to its children, a collection of LocalityGroupReaders
+      if(filter instanceof TimestampRangePredicate)
+        this.timestampFilter = (TimestampRangePredicate)filter;
+    }
   }
   
   public static void main(String[] args) throws Exception {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/main/java/org/apache/accumulo/core/iterators/Filterer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/Filterer.java b/core/src/main/java/org/apache/accumulo/core/iterators/Filterer.java
new file mode 100644
index 0000000..bda3665
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/Filterer.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.iterators;
+
+/**
+ * 
+ */
+public interface Filterer<K,V> {
+  public void applyFilter(Predicate<K,V> filter);
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/main/java/org/apache/accumulo/core/iterators/Predicate.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/Predicate.java b/core/src/main/java/org/apache/accumulo/core/iterators/Predicate.java
new file mode 100644
index 0000000..99a6e8b
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/Predicate.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.iterators;
+
+/**
+ * Predicate<K,V> supports a single method that is used to evaluate an input (K,V) pair as true or false
+ */
+public interface Predicate<K,V> {
+  public boolean evaluate(K k, V v);
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
new file mode 100644
index 0000000..eb5080b
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/TimestampRangePredicate.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.iterators.predicates;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Predicate;
+
+/**
+ * TimestampRangeFilter is used to determine whether a Key/Value pair falls within a timestamp range
+ */
+public class TimestampRangePredicate implements Predicate<Key,Value> {
+
+  public final long startTimestamp;
+  public final long endTimestamp;
+  
+  
+  /**
+   * @param startTimestamp - inclusive first allowable timestamp
+   * @param endTimestamp - inclusive last allowable timestamp
+   */
+  public TimestampRangePredicate(long startTimestamp, long endTimestamp) {
+    super();
+    this.startTimestamp = startTimestamp;
+    this.endTimestamp = endTimestamp;
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.accumulo.core.iterators.Predicate#evaluate(java.lang.Object, java.lang.Object)
+   */
+  /**
+   * return true IFF the key falls within the timestamp range
+   */
+  @Override
+  public boolean evaluate(Key k, Value v) {
+    long timestamp = k.getTimestamp();
+    return timestamp >= startTimestamp && timestamp <= endTimestamp;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/main/java/org/apache/accumulo/core/iterators/system/HeapIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/HeapIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/HeapIterator.java
index e54f37c..72aa3e7 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/HeapIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/HeapIterator.java
@@ -17,9 +17,12 @@
 package org.apache.accumulo.core.iterators.system;
 
 import java.io.IOException;
+import java.util.ArrayList;
 
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filterer;
+import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.commons.collections.buffer.PriorityBuffer;
 
@@ -55,28 +58,29 @@ public abstract class HeapIterator implements SortedKeyValueIterator<Key,Value>
   }
   
   @Override
-  final public Key getTopKey() {
+  public Key getTopKey() {
     return currentIter.getTopKey();
   }
   
   @Override
-  final public Value getTopValue() {
+  public Value getTopValue() {
     return currentIter.getTopValue();
   }
   
   @Override
-  final public boolean hasTop() {
+  public boolean hasTop() {
     return heap.size() > 0;
   }
   
   @Override
-  final public void next() throws IOException {
+  public void next() throws IOException {
     switch (heap.size()) {
       case 0:
         throw new IllegalStateException("Called next() when there is no top");
       case 1:
         // optimization for case when heap contains one entry,
         // avoids remove and add
+        // TODO apply the filters
         currentIter.next();
         if (!currentIter.hasTop()) {
           heap.remove();
@@ -85,6 +89,7 @@ public abstract class HeapIterator implements SortedKeyValueIterator<Key,Value>
         break;
       default:
         Index idx = (Index) heap.remove();
+        // TODO apply the filters
         idx.iter.next();
         if (idx.iter.hasTop()) {
           heap.add(idx);
@@ -111,5 +116,4 @@ public abstract class HeapIterator implements SortedKeyValueIterator<Key,Value>
     else
       currentIter = null;
   }
-  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
index a41f7be..a4391c0 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
@@ -23,26 +23,22 @@ import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.security.VisibilityEvaluator;
-import org.apache.accumulo.core.security.VisibilityParseException;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.commons.collections.map.LRUMap;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
 public class VisibilityFilter extends Filter {
-  private VisibilityEvaluator ve;
+  private Authorizations auths;
   private Text defaultVisibility;
   private LRUMap cache;
   private Text tmpVis;
   
   private static final Logger log = Logger.getLogger(VisibilityFilter.class);
   
-  public VisibilityFilter() {}
-  
   public VisibilityFilter(SortedKeyValueIterator<Key,Value> iterator, Authorizations authorizations, byte[] defaultVisibility) {
     setSource(iterator);
-    this.ve = new VisibilityEvaluator(authorizations);
+    this.auths = authorizations;
     this.defaultVisibility = new Text(defaultVisibility);
     this.cache = new LRUMap(1000);
     this.tmpVis = new Text();
@@ -50,7 +46,7 @@ public class VisibilityFilter extends Filter {
   
   @Override
   public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
-    return new VisibilityFilter(getSource().deepCopy(env), ve.getAuthorizations(), TextUtil.getBytes(defaultVisibility));
+    return new VisibilityFilter(getSource().deepCopy(env), auths, TextUtil.getBytes(defaultVisibility));
   }
   
   @Override
@@ -66,13 +62,8 @@ public class VisibilityFilter extends Filter {
     if (b != null)
       return b;
     
-    try {
-      Boolean bb = ve.evaluate(new ColumnVisibility(testVis));
-      cache.put(new Text(testVis), bb);
-      return bb;
-    } catch (VisibilityParseException e) {
-      log.error("Parse Error", e);
-      return false;
-    }
+    Boolean bb = new ColumnVisibility(testVis).evaluate(auths);
+    cache.put(new Text(testVis), bb);
+    return bb;
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java b/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
index 5a825f2..1b72b33 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
@@ -16,16 +16,16 @@
  */
 package org.apache.accumulo.core.security;
 
-import java.util.ArrayList;
+import java.io.ByteArrayOutputStream;
 import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
+import java.util.Iterator;
+import java.util.TreeSet;
 
+import org.apache.accumulo.core.data.ArrayByteSequence;
+import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.util.BadArgumentException;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparator;
 
 /**
  * Validate the column visibility is a valid expression and set the visibility for a Mutation. See {@link ColumnVisibility#ColumnVisibility(byte[])} for the
@@ -33,117 +33,190 @@ import org.apache.hadoop.io.WritableComparator;
  */
 public class ColumnVisibility {
   
-  Node node = null;
-  private byte[] expression;
-  
-  /**
-   * Accessor for the underlying byte string.
-   * 
-   * @return byte array representation of a visibility expression
-   */
-  public byte[] getExpression() {
-    return expression;
-  }
+  private Node node = null;
   
   public static enum NodeType {
     TERM, OR, AND,
   }
-  
-  public static class Node {
-    public final static List<Node> EMPTY = Collections.emptyList();
-    NodeType type;
-    int start = 0;
-    int end = 0;
-    List<Node> children = EMPTY;
+
+  private static abstract class Node implements Comparable<Node> {
+    protected final NodeType type;
     
-    public Node(NodeType type) {
+    public Node(NodeType type)
+    {
       this.type = type;
     }
-    
-    public Node(int start, int end) {
-      this.type = NodeType.TERM;
-      this.start = start;
-      this.end = end;
+
+    public byte[] generate() {
+      ByteArrayOutputStream baos = new ByteArrayOutputStream();
+      generate(baos,false);
+      return baos.toByteArray();
     }
     
-    public void add(Node child) {
-      if (children == EMPTY)
-        children = new ArrayList<Node>();
-      
-      children.add(child);
-    }
+    public abstract boolean evaluate(Authorizations auths);
+    
+    protected abstract void generate(ByteArrayOutputStream baos, boolean parens);
+  }
+  
+  private static class TermNode extends Node {
+    
+    final ByteSequence bs;
     
-    public NodeType getType() {
-      return type;
+    public TermNode(final ByteSequence bs) {
+      super(NodeType.TERM);
+      this.bs = bs;
     }
     
-    public List<Node> getChildren() {
-      return children;
+    public boolean evaluate(Authorizations auths)
+    {
+      return auths.contains(bs);
+    }
+
+
+    protected void generate(ByteArrayOutputStream baos, boolean parens)
+    {
+      baos.write(bs.getBackingArray(), bs.offset(), bs.length());
     }
     
-    public int getTermStart() {
-      return start;
+    @Override
+    public boolean equals(Object other) {
+      if(other instanceof TermNode)
+      {
+        return bs.compareTo(((TermNode)other).bs) == 0;
+      }
+      return false;
     }
     
-    public int getTermEnd() {
-      return end;
+    @Override
+    public int compareTo(Node o) {
+      if(o.type == NodeType.TERM)
+      {
+        return bs.compareTo(((TermNode)o).bs);
+      }
+      return type.ordinal() - o.type.ordinal();
     }
   }
   
-  public static class NodeComparator implements Comparator<Node> {
+  private abstract static class AggregateNode extends Node {
+
+    /**
+     * @param type
+     */
+    public AggregateNode(NodeType type) {
+      super(type);
+    }
     
-    byte[] text;
+    protected TreeSet<Node> children = new TreeSet<Node>();
     
-    NodeComparator(byte[] text) {
-      this.text = text;
+    protected abstract byte getOperator();
+    
+    @Override
+    protected void generate(ByteArrayOutputStream baos, boolean parens) {
+      if(parens)
+        baos.write('(');
+      boolean first = true;
+      for(Node child:children)
+      {
+        if(!first)
+          baos.write(getOperator());
+        child.generate(baos, true);
+        first = false;
+      }
+      if(parens)
+        baos.write(')');
     }
     
     @Override
-    public int compare(Node a, Node b) {
-      int diff = a.type.ordinal() - b.type.ordinal();
-      if (diff != 0)
-        return diff;
-      switch (a.type) {
-        case TERM:
-          return WritableComparator.compareBytes(text, a.start, a.end - a.start, text, b.start, b.end - b.start);
-        case OR:
-        case AND:
-          diff = a.children.size() - b.children.size();
-          if (diff != 0)
-            return diff;
-          for (int i = 0; i < a.children.size(); i++) {
-            diff = compare(a.children.get(i), b.children.get(i));
-            if (diff != 0)
-              return diff;
-          }
+    public int compareTo(Node o) {
+      int ordinalDiff = type.ordinal() - o.type.ordinal();
+      if(ordinalDiff != 0)
+        return ordinalDiff;
+      AggregateNode other = (AggregateNode)o;
+      int childCountDifference = children.size() - other.children.size();
+      if(childCountDifference != 0)
+        return childCountDifference;
+      Iterator<Node> otherChildren = other.children.iterator();
+      for(Node n1:children)
+      {
+        int comp = n1.compareTo(otherChildren.next());
+        if(comp != 0)
+          return comp;
       }
       return 0;
     }
+
   }
   
-  static private void flatten(Node root, byte[] expression, StringBuilder out) {
-    if (root.type == NodeType.TERM)
-      out.append(new String(expression, root.start, root.end - root.start));
-    else {
-      String sep = "";
-      Collections.sort(root.children, new NodeComparator(expression));
-      for (Node c : root.children) {
-        out.append(sep);
-        boolean parens = (c.type != NodeType.TERM && root.type != c.type);
-        if (parens)
-          out.append("(");
-        flatten(c, expression, out);
-        if (parens)
-          out.append(")");
-        sep = root.type == NodeType.AND ? "&" : "|";
-      }
+  private static class OrNode extends AggregateNode {
+
+    public OrNode() {
+      super(NodeType.OR);
+    }
+
+    @Override
+    public boolean evaluate(Authorizations auths) {
+      for(Node child:children)
+        if(child.evaluate(auths))
+          return true;
+      return false;
+    }
+
+    @Override
+    protected byte getOperator() {
+      return '|';
     }
+    
+  }
+  
+  private static class AndNode extends AggregateNode {
+
+    public AndNode()
+    {
+      super(NodeType.AND);
+    }
+    
+    @Override
+    public boolean evaluate(Authorizations auths) {
+      for(Node child:children)
+        if(!child.evaluate(auths))
+          return false;
+      return true;
+    }
+
+    @Override
+    protected byte getOperator() {
+      return '&';
+    }
+    
   }
+
+  private byte[] expression = null;
   
+  /**
+   * @deprecated
+   * @see org.apache.accumulo.security.ColumnVisibility#getExpression()
+   */
   public byte[] flatten() {
-    StringBuilder builder = new StringBuilder();
-    flatten(node, expression, builder);
-    return builder.toString().getBytes();
+    return getExpression();
+  }
+  
+  /**
+   * Generate the byte[] that represents this ColumnVisibility.
+   * @return a byte[] representation of this visibility
+   */
+  public byte[] getExpression(){
+    if(expression != null)
+      return expression;
+    expression = _flatten();
+    return expression;
+  }
+  
+  private static final byte[] emptyExpression = new byte[0];
+  
+  private byte[] _flatten() {
+    if(node == null)
+      return emptyExpression;
+    return node.generate();
   }
   
   private static class ColumnVisibilityParser {
@@ -170,7 +243,7 @@ public class ColumnVisibility {
       if (start != end) {
         if (expr != null)
           throw new BadArgumentException("expression needs | or &", new String(expression), start);
-        return new Node(start, end);
+        return new TermNode(new ArrayByteSequence(expression, start, end - start));
       }
       if (expr == null)
         throw new BadArgumentException("empty term", new String(expression), start);
@@ -189,9 +262,9 @@ public class ColumnVisibility {
               if (!result.type.equals(NodeType.AND))
                 throw new BadArgumentException("cannot mix & and |", new String(expression), index - 1);
             } else {
-              result = new Node(NodeType.AND);
+              result = new AndNode();
             }
-            result.add(expr);
+            ((AggregateNode)result).children.add(expr);
             expr = null;
             termStart = index;
             break;
@@ -202,9 +275,9 @@ public class ColumnVisibility {
               if (!result.type.equals(NodeType.OR))
                 throw new BadArgumentException("cannot mix | and &", new String(expression), index - 1);
             } else {
-              result = new Node(NodeType.OR);
+              result = new OrNode();
             }
-            result.add(expr);
+            ((AggregateNode)result).children.add(expr);
             expr = null;
             termStart = index;
             break;
@@ -225,11 +298,21 @@ public class ColumnVisibility {
             if (result == null)
               return child;
             if (result.type == child.type)
-              for (Node c : child.children)
-                result.add(c);
+            {
+              AggregateNode parenNode = (AggregateNode)child;
+              for (Node c : parenNode.children)
+                ((AggregateNode)result).children.add(c);
+            }
             else
-              result.add(child);
-            result.end = index - 1;
+              ((AggregateNode)result).children.add(child);
+            if (result.type != NodeType.TERM)
+            {
+              AggregateNode resultNode = (AggregateNode)result;
+              if (resultNode.children.size() == 1)
+                return resultNode.children.first();
+              if (resultNode.children.size() < 2)
+                throw new BadArgumentException("missing term", new String(expression), index);
+            }
             return result;
           }
           default: {
@@ -241,12 +324,24 @@ public class ColumnVisibility {
       }
       Node child = processTerm(termStart, index, expr, expression);
       if (result != null)
-        result.add(child);
+      {
+        if(result.type == child.type)
+        {
+          ((AggregateNode)result).children.addAll(((AggregateNode)child).children);
+        }
+        else
+          ((AggregateNode)result).children.add(child);
+      }
       else
         result = child;
       if (result.type != NodeType.TERM)
-        if (result.children.size() < 2)
+      {
+        AggregateNode resultNode = (AggregateNode)result;
+        if (resultNode.children.size() == 1)
+          return resultNode.children.first();
+        if (resultNode.children.size() < 2)
           throw new BadArgumentException("missing term", new String(expression), index);
+      }
       return result;
     }
   }
@@ -256,14 +351,12 @@ public class ColumnVisibility {
       ColumnVisibilityParser p = new ColumnVisibilityParser();
       node = p.parse(expression);
     }
-    this.expression = expression;
   }
   
   /**
    * Empty visibility. Normally, elements with empty visibility can be seen by everyone. Though, one could change this behavior with filters.
    */
   public ColumnVisibility() {
-    expression = new byte[0];
   }
   
   /**
@@ -279,6 +372,10 @@ public class ColumnVisibility {
     this(TextUtil.getBytes(expression));
   }
   
+  private ColumnVisibility(Node node) {
+    this.node = node;
+  }
+  
   /**
    * Set the column visibility for a Mutation.
    * 
@@ -313,7 +410,7 @@ public class ColumnVisibility {
   
   @Override
   public String toString() {
-    return "[" + new String(expression) + "]";
+    return "[" + new String(this.getExpression()) + "]";
   }
   
   /**
@@ -329,16 +426,55 @@ public class ColumnVisibility {
   /**
    * Compares two ColumnVisibilities for string equivalence, not as a meaningful comparison of terms and conditions.
    */
-  public boolean equals(ColumnVisibility otherLe) {
-    return Arrays.equals(expression, otherLe.expression);
-  }
+//  public boolean equals(ColumnVisibility otherLe) {
+//    return Arrays.equals(expression, otherLe.expression);
+//  }
   
   @Override
   public int hashCode() {
-    return Arrays.hashCode(expression);
+    return Arrays.hashCode(getExpression());
   }
   
-  public Node getParseTree() {
-    return node;
+  public boolean evaluate(Authorizations auths) {
+    if(node == null)
+      return true;
+    return node.evaluate(auths);
   }
+  
+  public ColumnVisibility or(ColumnVisibility other)
+  {
+    if(node == null)
+      return this;
+    if(other.node == null)
+      return other;
+    OrNode orNode = new OrNode();
+    if(other.node instanceof OrNode)
+      orNode.children.addAll(((OrNode)other.node).children);
+    else
+      orNode.children.add(other.node);
+    if(node instanceof OrNode)
+      orNode.children.addAll(((OrNode)node).children);
+    else
+      orNode.children.add(node);
+    return new ColumnVisibility(orNode);
+  }
+  
+  public ColumnVisibility and(ColumnVisibility other)
+  {
+    if(node == null)
+      return other;
+    if(other.node == null)
+      return this;
+    AndNode andNode = new AndNode();
+    if(other.node instanceof AndNode)
+      andNode.children.addAll(((AndNode)other.node).children);
+    else
+      andNode.children.add(other.node);
+    if(node instanceof AndNode)
+      andNode.children.addAll(((AndNode)node).children);
+    else
+      andNode.children.add(node);
+    return new ColumnVisibility(andNode);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/main/java/org/apache/accumulo/core/security/VisibilityConstraint.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/VisibilityConstraint.java b/core/src/main/java/org/apache/accumulo/core/security/VisibilityConstraint.java
index c8b33ba..1df543f 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/VisibilityConstraint.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/VisibilityConstraint.java
@@ -47,7 +47,7 @@ public class VisibilityConstraint implements Constraint {
     if (updates.size() > 1)
       ok = new HashSet<String>();
     
-    VisibilityEvaluator ve = null;
+    Authorizations auths = env.getAuthorizations();
     
     for (ColumnUpdate update : updates) {
       
@@ -59,16 +59,11 @@ public class VisibilityConstraint implements Constraint {
         
         try {
           
-          if (ve == null)
-            ve = new VisibilityEvaluator(env.getAuthorizations());
-          
-          if (!ve.evaluate(new ColumnVisibility(cv)))
+          if (!new ColumnVisibility(cv).evaluate(auths))
             return Collections.singletonList(new Short((short) 2));
           
         } catch (BadArgumentException bae) {
           return Collections.singletonList(new Short((short) 1));
-        } catch (VisibilityParseException e) {
-          return Collections.singletonList(new Short((short) 1));
         }
         
         if (ok != null)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
index c5e2501..3da616d 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
@@ -32,6 +32,7 @@ import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader;
 import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader.IndexIterator;
 import org.apache.accumulo.core.file.rfile.MultiLevelIndex.Writer;
 import org.apache.accumulo.core.file.rfile.RFileTest.SeekableByteArrayInputStream;
+import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -58,9 +59,9 @@ public class MultiLevelIndexTest extends TestCase {
     BufferedWriter mliw = new BufferedWriter(new Writer(_cbw, maxBlockSize));
     
     for (int i = 0; i < num; i++)
-      mliw.add(new Key(String.format("%05d000", i)), i, 0, 0, 0);
+      mliw.add(new Key(String.format("%05d000", i)), 0l, 0l, new ColumnVisibility(), i, 0, 0, 0, RFile.RINDEX_VER_7);
     
-    mliw.addLast(new Key(String.format("%05d000", num)), num, 0, 0, 0);
+    mliw.addLast(new Key(String.format("%05d000", num)), 0l, 0l, new ColumnVisibility(), num, 0, 0, 0, RFile.RINDEX_VER_7);
     
     ABlockWriter root = _cbw.prepareMetaBlock("root");
     mliw.close(root);
@@ -75,7 +76,7 @@ public class MultiLevelIndexTest extends TestCase {
     FSDataInputStream in = new FSDataInputStream(bais);
     CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, CachedConfiguration.getInstance());
     
-    Reader reader = new Reader(_cbr, RFile.RINDEX_VER_6);
+    Reader reader = new Reader(_cbr, RFile.RINDEX_VER_7);
     BlockRead rootIn = _cbr.getMetaBlock("root");
     reader.readFields(rootIn);
     rootIn.close();
@@ -90,15 +91,6 @@ public class MultiLevelIndexTest extends TestCase {
     
     assertEquals(num + 1, count);
     
-    while (liter.hasPrevious()) {
-      count--;
-      assertEquals(count, liter.previousIndex());
-      assertEquals(count, liter.peekPrevious().getNumEntries());
-      assertEquals(count, liter.previous().getNumEntries());
-    }
-    
-    assertEquals(0, count);
-    
     // go past the end
     liter = reader.lookup(new Key(String.format("%05d000", num + 1)));
     assertFalse(liter.hasNext());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
index ed7cf7b..71f5c6c 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
@@ -24,8 +24,10 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.Random;
 import java.util.Set;
 
 import junit.framework.TestCase;
@@ -51,8 +53,11 @@ import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.junit.Test;
 
-public class RFileTest extends TestCase {
+import static org.junit.Assert.*;
+
+public class RFileTest {
   
   private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<ByteSequence>();
   
@@ -206,6 +211,7 @@ public class RFileTest extends TestCase {
     return String.format(prefix + "%06d", i);
   }
   
+  @Test
   public void test1() throws IOException {
     
     // test an emprt file
@@ -224,6 +230,7 @@ public class RFileTest extends TestCase {
     trf.closeReader();
   }
   
+  @Test
   public void test2() throws IOException {
     
     // test an rfile with one entry
@@ -260,6 +267,7 @@ public class RFileTest extends TestCase {
     trf.closeReader();
   }
   
+  @Test
   public void test3() throws IOException {
     
     // test an rfile with multiple rows having multiple columns
@@ -403,6 +411,7 @@ public class RFileTest extends TestCase {
     assertFalse(evi.hasNext());
   }
   
+  @Test
   public void test4() throws IOException {
     TestRFile trf = new TestRFile();
     
@@ -445,6 +454,7 @@ public class RFileTest extends TestCase {
     }
   }
   
+  @Test
   public void test5() throws IOException {
     
     TestRFile trf = new TestRFile();
@@ -473,6 +483,7 @@ public class RFileTest extends TestCase {
     trf.closeReader();
   }
   
+  @Test
   public void test6() throws IOException {
     
     TestRFile trf = new TestRFile();
@@ -505,6 +516,7 @@ public class RFileTest extends TestCase {
     trf.closeReader();
   }
   
+  @Test
   public void test7() throws IOException {
     // these test excercise setting the end key of a range
     
@@ -556,6 +568,7 @@ public class RFileTest extends TestCase {
     trf.reader.close();
   }
   
+  @Test
   public void test8() throws IOException {
     TestRFile trf = new TestRFile();
     
@@ -672,6 +685,7 @@ public class RFileTest extends TestCase {
     return cfs;
   }
   
+  @Test
   public void test9() throws IOException {
     TestRFile trf = new TestRFile();
     
@@ -813,6 +827,7 @@ public class RFileTest extends TestCase {
     
   }
   
+  @Test
   public void test10() throws IOException {
     
     // test empty locality groups
@@ -941,6 +956,7 @@ public class RFileTest extends TestCase {
     trf.closeReader();
   }
   
+  @Test
   public void test11() throws IOException {
     // test locality groups with more than two entries
     
@@ -1045,6 +1061,7 @@ public class RFileTest extends TestCase {
     trf.closeReader();
   }
   
+  @Test
   public void test12() throws IOException {
     // test inserting column fams not in locality groups
     
@@ -1076,6 +1093,7 @@ public class RFileTest extends TestCase {
     
   }
   
+  @Test
   public void test13() throws IOException {
     // test inserting column fam in default loc group that was in
     // previous locality group
@@ -1117,6 +1135,7 @@ public class RFileTest extends TestCase {
     
   }
   
+  @Test
   public void test14() throws IOException {
     // test starting locality group after default locality group was started
     
@@ -1142,6 +1161,7 @@ public class RFileTest extends TestCase {
     trf.writer.close();
   }
   
+  @Test
   public void test16() throws IOException {
     TestRFile trf = new TestRFile();
     
@@ -1160,6 +1180,7 @@ public class RFileTest extends TestCase {
     trf.closeWriter();
   }
   
+  @Test
   public void test17() throws IOException {
     // add alot of the same keys to rfile that cover multiple blocks...
     // this should cause the keys in the index to be exactly the same...
@@ -1298,6 +1319,7 @@ public class RFileTest extends TestCase {
     assertEquals(nonExcluded, colFamsSeen);
   }
   
+  @Test
   public void test18() throws IOException {
     // test writing more column families to default LG than it will track
     
@@ -1349,6 +1371,7 @@ public class RFileTest extends TestCase {
     trf.closeReader();
   }
   
+  @Test
   public void test19() throws IOException {
     // test RFile metastore
     TestRFile trf = new TestRFile();
@@ -1401,6 +1424,7 @@ public class RFileTest extends TestCase {
     trf.closeReader();
   }
   
+  @Test
   public void testOldVersions() throws Exception {
     runVersionTest(3);
     runVersionTest(4);
@@ -1459,4 +1483,23 @@ public class RFileTest extends TestCase {
     
     reader.close();
   }
+  
+  @Test
+  public void testSingleKeyBlocks() throws IOException
+  {
+    byte[] bytes = new byte[2000];
+    Random r = new Random();
+    r.nextBytes(bytes);
+    TestRFile trf = new TestRFile();
+    trf.openWriter(false);
+    Value vBig = new Value(bytes);
+    trf.writer.startNewLocalityGroup("one", Collections.singleton((ByteSequence)(new ArrayByteSequence("one"))));
+    trf.writer.append(new Key("r1","one"), vBig);
+    trf.writer.append(new Key("r2","one"), vBig);
+    trf.writer.startNewLocalityGroup("two", Collections.singleton((ByteSequence)(new ArrayByteSequence("two"))));
+    trf.writer.append(new Key("r1","two"), vBig);
+    trf.writer.append(new Key("r2","two"), vBig);
+    trf.writer.close();
+    
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
new file mode 100644
index 0000000..c58f924
--- /dev/null
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.file.rfile;
+
+import static org.junit.Assert.*;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.util.Collections;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
+import org.apache.accumulo.core.file.rfile.RFileTest.SeekableByteArrayInputStream;
+import org.apache.accumulo.core.iterators.Predicate;
+import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
+import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.junit.Test;
+
+public class TimestampFilterTest {
+  
+  @Test
+  public void testRFileTimestampFiltering() throws Exception {
+    // TODO create an RFile with increasing timestamp and random key order
+    Predicate<Key,Value> timeRange = new TimestampRangePredicate(100, 110);
+    int expected = 0;
+    Random r = new Random();
+    Configuration conf = new Configuration();
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    FSDataOutputStream dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
+    CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(dos, "gz", conf);
+    RFile.Writer writer = new RFile.Writer(_cbw, 1000, 1000);
+    writer.startDefaultLocalityGroup();
+    byte [] row = new byte[10];
+    byte [] colFam = new byte[10];
+    byte [] colQual = new byte[10];
+    Value value = new Value(new byte[0]);
+    byte [] colVis = new byte[0];
+    TreeMap<Key,Value> inputBuffer = new TreeMap<Key,Value>();
+    for(int i = 0; i < 100000; i++)
+    {
+      r.nextBytes(row);
+      r.nextBytes(colFam);
+      r.nextBytes(colQual);
+      Key k = new Key(row,colFam,colQual,colVis,(long)i);
+      if(timeRange.evaluate(k, value))
+        expected++;
+      inputBuffer.put(k, value);
+    }
+    for(Entry<Key,Value> e:inputBuffer.entrySet())
+    {
+      writer.append(e.getKey(), e.getValue());
+    }
+    writer.close();
+
+    // scan the RFile to bring back keys in a given timestamp range
+    byte[] data = baos.toByteArray();
+    ByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
+    FSDataInputStream in = new FSDataInputStream(bais);
+    CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, conf);
+    RFile.Reader reader = new RFile.Reader(_cbr);
+    int count = 0;
+    reader.applyFilter(timeRange);
+    reader.seek(new Range(), Collections.EMPTY_SET, false);
+    while(reader.hasTop())
+    {
+      count++;
+      assertTrue(timeRange.evaluate(reader.getTopKey(),reader.getTopValue()));
+      reader.next();
+    }
+    assertEquals(expected, count);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/test/java/org/apache/accumulo/core/iterators/user/IndexedDocIteratorTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/iterators/user/IndexedDocIteratorTest.java b/core/src/test/java/org/apache/accumulo/core/iterators/user/IndexedDocIteratorTest.java
index 5508a4d..4681a61 100644
--- a/core/src/test/java/org/apache/accumulo/core/iterators/user/IndexedDocIteratorTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/iterators/user/IndexedDocIteratorTest.java
@@ -24,8 +24,6 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.TreeMap;
 
-import junit.framework.TestCase;
-
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
@@ -40,8 +38,12 @@ import org.apache.accumulo.core.iterators.system.MultiIterator;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
 
-public class IndexedDocIteratorTest extends TestCase {
+public class IndexedDocIteratorTest {
   
   private static final Logger log = Logger.getLogger(IndexedDocIteratorTest.class);
   
@@ -171,7 +173,7 @@ public class IndexedDocIteratorTest extends TestCase {
   
   public void testNull() {}
   
-  @Override
+  @Before
   public void setUp() {
     Logger.getRootLogger().setLevel(Level.ERROR);
   }
@@ -179,6 +181,7 @@ public class IndexedDocIteratorTest extends TestCase {
   private static final int NUM_ROWS = 5;
   private static final int NUM_DOCIDS = 200;
   
+  @Test
   public void test1() throws IOException {
     columnFamilies = new Text[2];
     columnFamilies[0] = new Text("CC");
@@ -216,6 +219,7 @@ public class IndexedDocIteratorTest extends TestCase {
     cleanup();
   }
   
+  @Test
   public void test2() throws IOException {
     columnFamilies = new Text[3];
     columnFamilies[0] = new Text("A");
@@ -250,6 +254,7 @@ public class IndexedDocIteratorTest extends TestCase {
     cleanup();
   }
   
+  @Test
   public void test3() throws IOException {
     columnFamilies = new Text[6];
     columnFamilies[0] = new Text("C");
@@ -292,6 +297,7 @@ public class IndexedDocIteratorTest extends TestCase {
     cleanup();
   }
   
+  @Test
   public void test4() throws IOException {
     columnFamilies = new Text[3];
     boolean[] notFlags = new boolean[3];

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java b/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
index df1863a..d463f42 100644
--- a/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/ColumnVisibilityTest.java
@@ -16,8 +16,7 @@
  */
 package org.apache.accumulo.core.security;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import org.junit.Test;
 
@@ -64,13 +63,6 @@ public class ColumnVisibilityTest {
     shouldThrow("a*b");
   }
   
-  public void normalized(String... values) {
-    for (int i = 0; i < values.length; i += 2) {
-      ColumnVisibility cv = new ColumnVisibility(values[i].getBytes());
-      assertArrayEquals(cv.flatten(), values[i + 1].getBytes());
-    }
-  }
-  
   @Test
   public void testComplexCompound() {
     shouldNotThrow("(a|b)&(x|y)");
@@ -79,11 +71,61 @@ public class ColumnVisibilityTest {
     shouldNotThrow("(one&two)|(foo&bar)", "(one|foo)&three", "one|foo|bar", "(one|foo)|bar", "((one|foo)|bar)&two");
   }
   
+  public void normalized(String... values) {
+    for (int i = 0; i < values.length; i += 2) {
+      ColumnVisibility cv = new ColumnVisibility(values[i].getBytes());
+      assertArrayEquals(cv.getExpression(), values[i + 1].getBytes());
+    }
+  }
+  
   @Test
   public void testNormalization() {
     normalized("a", "a", "(a)", "a", "b|a", "a|b", "(b)|a", "a|b", "(b|(a|c))&x", "x&(a|b|c)", "(((a)))", "a");
+    normalized("a|a", "a", "a|(a&a)", "a", "(a&b)|(b&a)", "a&b");
+    normalized("a|(a|(a|b))","a|b");
+    normalized("a|(a|(a|a))","a");
+  }
+  
+  public void aOrBEqualC(String a, String b, String c)
+  {
+    ColumnVisibility cvA = new ColumnVisibility(a.getBytes());
+    ColumnVisibility cvB = new ColumnVisibility(b.getBytes());
+    ColumnVisibility cvC = cvA.or(cvB);
+    assertArrayEquals(cvC.getExpression(), c.getBytes());
+    // check that we didn't disturb the original ColumnVisibilities
+    assertArrayEquals(cvA.getExpression(), a.getBytes());
+    assertArrayEquals(cvB.getExpression(), b.getBytes());
+  }
+  
+  @Test
+  public void testDisjunction() {
+    aOrBEqualC("a", "b", "a|b");
+    aOrBEqualC("c|(a&b)", "b", "b|c|(a&b)");
+    aOrBEqualC("c|(a&b)", "a|c","a|c|(a&b)");
+    aOrBEqualC("a&b","c&d","(a&b)|(c&d)");
+    aOrBEqualC("a","","");
   }
   
+  public void aAndBEqualC(String a, String b, String c)
+  {
+    ColumnVisibility cvA = new ColumnVisibility(a.getBytes());
+    ColumnVisibility cvB = new ColumnVisibility(b.getBytes());
+    ColumnVisibility cvC = cvA.and(cvB);
+    assertArrayEquals(cvC.getExpression(), c.getBytes());
+    // check that we didn't disturb the original ColumnVisibilities
+    assertArrayEquals(cvA.getExpression(), a.getBytes());
+    assertArrayEquals(cvB.getExpression(), b.getBytes());
+  }
+  
+  @Test
+  public void testConjunction() {
+    aAndBEqualC("a", "b", "a&b");
+    aAndBEqualC("a&b", "c", "a&b&c");
+    aAndBEqualC("a&(b|(c&d))", "e&(b|(c&d))","a&e&(b|(c&d))");
+    aAndBEqualC("a|b","c|d","(a|b)&(c|d)");
+    aAndBEqualC("a","","a");
+  }
+
   @Test
   public void testDanglingOperators() {
     shouldThrow("a|b&");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3fcd07de/core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java b/core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java
index b5c2455..7612e15 100644
--- a/core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/security/VisibilityEvaluatorTest.java
@@ -26,33 +26,33 @@ import org.junit.Test;
 public class VisibilityEvaluatorTest {
   
   @Test
-  public void testVisibilityEvaluator() throws VisibilityParseException {
-    VisibilityEvaluator ct = new VisibilityEvaluator(ByteArraySet.fromStrings("one", "two", "three", "four"));
+  public void testVisibilityEvaluator() {
+    Authorizations auths = new Authorizations(ByteArraySet.fromStrings("one", "two", "three", "four"));
     
     // test for and
-    assertTrue("'and' test", ct.evaluate(new ColumnVisibility("one&two")));
+    assertTrue("'and' test", new ColumnVisibility("one&two").evaluate(auths));
     
     // test for or
-    assertTrue("'or' test", ct.evaluate(new ColumnVisibility("foor|four")));
+    assertTrue("'or' test", new ColumnVisibility("foor|four").evaluate(auths));
     
     // test for and and or
-    assertTrue("'and' and 'or' test", ct.evaluate(new ColumnVisibility("(one&two)|(foo&bar)")));
+    assertTrue("'and' and 'or' test", new ColumnVisibility("(one&two)|(foo&bar)").evaluate(auths));
     
     // test for false negatives
     for (String marking : new String[] {"one", "one|five", "five|one", "(one)", "(one&two)|(foo&bar)", "(one|foo)&three", "one|foo|bar", "(one|foo)|bar",
         "((one|foo)|bar)&two"}) {
-      assertTrue(marking, ct.evaluate(new ColumnVisibility(marking)));
+      assertTrue(marking, new ColumnVisibility(marking).evaluate(auths));
     }
     
     // test for false positives
     for (String marking : new String[] {"five", "one&five", "five&one", "((one|foo)|bar)&goober"}) {
-      assertFalse(marking, ct.evaluate(new ColumnVisibility(marking)));
+      assertFalse(marking, new ColumnVisibility(marking).evaluate(auths));
     }
     
     // test missing separators; these should throw an exception
     for (String marking : new String[] {"one(five)", "(five)one", "(one)(two)", "a|(b(c))"}) {
       try {
-        ct.evaluate(new ColumnVisibility(marking));
+        new ColumnVisibility(marking).evaluate(auths);
         fail(marking + " failed to throw");
       } catch (Throwable e) {
         // all is good
@@ -62,7 +62,7 @@ public class VisibilityEvaluatorTest {
     // test unexpected separator
     for (String marking : new String[] {"&(five)", "|(five)", "(five)&", "five|", "a|(b)&", "(&five)", "(five|)"}) {
       try {
-        ct.evaluate(new ColumnVisibility(marking));
+        new ColumnVisibility(marking).evaluate(auths);
         fail(marking + " failed to throw");
       } catch (Throwable e) {
         // all is good
@@ -72,7 +72,7 @@ public class VisibilityEvaluatorTest {
     // test mismatched parentheses
     for (String marking : new String[] {"(", ")", "(a&b", "b|a)"}) {
       try {
-        ct.evaluate(new ColumnVisibility(marking));
+        new ColumnVisibility(marking).evaluate(auths);
         fail(marking + " failed to throw");
       } catch (Throwable e) {
         // all is good


[06/15] git commit: ACCUMULO-652 limited size of minimumVisibility and consolidated code for block stats

Posted by el...@apache.org.
ACCUMULO-652 limited size of minimumVisibility and consolidated code for block stats

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1354746 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/8a3ddeb3
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/8a3ddeb3
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/8a3ddeb3

Branch: refs/heads/ACCUMULO-652
Commit: 8a3ddeb36643d8c77403e0fefb38d43f573c47b0
Parents: 064403b
Author: Adam Fuchs <af...@apache.org>
Authored: Wed Jun 27 21:41:31 2012 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Wed Jun 27 21:41:31 2012 +0000

----------------------------------------------------------------------
 .../core/file/rfile/MultiLevelIndex.java        | 83 ++++++--------------
 .../apache/accumulo/core/file/rfile/RFile.java  | 43 +++-------
 .../core/file/rfile/MultiLevelIndexTest.java    |  7 +-
 3 files changed, 38 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/8a3ddeb3/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
index e2b4b15..4163894 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
@@ -47,21 +47,15 @@ public class MultiLevelIndex {
   
   public static class IndexEntry implements WritableComparable<IndexEntry> {
     private Key key;
-    private long minTimestamp;
-    private long maxTimestamp;
-    private ColumnVisibility minimumVisibility = null;
-    private int entries;
+    private BlockStats blockStats;
     private long offset;
     private long compressedSize;
     private long rawSize;
     private int format;
     
-    IndexEntry(Key k, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int e, long offset, long compressedSize, long rawSize, int version) {
+    IndexEntry(Key k, BlockStats blockStats, long offset, long compressedSize, long rawSize, int version) {
       this.key = k;
-      this.minTimestamp = minTimestamp;
-      this.maxTimestamp = maxTimestamp;
-      this.minimumVisibility = minimumVisibility;
-      this.entries = e;
+      this.blockStats = blockStats;
       this.offset = offset;
       this.compressedSize = compressedSize;
       this.rawSize = rawSize;
@@ -76,20 +70,8 @@ public class MultiLevelIndex {
     public void readFields(DataInput in) throws IOException {
       key = new Key();
       key.readFields(in);
-      if(format == RFile.RINDEX_VER_7)
-      {
-        minTimestamp = in.readLong();
-        maxTimestamp = in.readLong();
-        byte[] visibility = new byte[in.readInt()];
-        in.readFully(visibility);
-        minimumVisibility = new ColumnVisibility(visibility);
-      }
-      else
-      {
-        minTimestamp = Long.MIN_VALUE;
-        maxTimestamp = Long.MAX_VALUE;
-      }
-      entries = in.readInt();
+      blockStats = new BlockStats(format);
+      blockStats.readFields(in);
       if (format == RFile.RINDEX_VER_6 || format == RFile.RINDEX_VER_7) {
         offset = Utils.readVLong(in);
         compressedSize = Utils.readVLong(in);
@@ -104,15 +86,7 @@ public class MultiLevelIndex {
     @Override
     public void write(DataOutput out) throws IOException {
       key.write(out);
-      if(format == RFile.RINDEX_VER_7)
-      {
-        out.writeLong(minTimestamp);
-        out.writeLong(maxTimestamp);
-        byte[] visibility = minimumVisibility.getExpression();
-        out.writeInt(visibility.length);
-        out.write(visibility);
-      }
-      out.writeInt(entries);
+      blockStats.write(out);
       if (format == RFile.RINDEX_VER_6 || format == RFile.RINDEX_VER_7) {
         Utils.writeVLong(out, offset);
         Utils.writeVLong(out, compressedSize);
@@ -125,7 +99,7 @@ public class MultiLevelIndex {
     }
     
     public int getNumEntries() {
-      return entries;
+      return blockStats.entries;
     }
     
     public long getOffset() {
@@ -233,9 +207,7 @@ public class MultiLevelIndex {
     private ByteArrayOutputStream indexBytes;
     private DataOutputStream indexOut;
     
-    private long minTimestamp = Long.MAX_VALUE;
-    private long maxTimestamp = Long.MIN_VALUE;
-    private ColumnVisibility minimumVisibility = null;
+    private BlockStats blockStats = new BlockStats();
     
     private ArrayList<Integer> offsets;
     private int level;
@@ -256,17 +228,10 @@ public class MultiLevelIndex {
     
     public IndexBlock() {}
     
-    public void add(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int value, long offset, long compressedSize, long rawSize, int version) throws IOException {
+    public void add(Key key, BlockStats blockStats, long offset, long compressedSize, long rawSize, int version) throws IOException {
       offsets.add(indexOut.size());
-      if (this.minTimestamp > minTimestamp)
-        this.minTimestamp = minTimestamp;
-      if (this.maxTimestamp < maxTimestamp)
-        this.maxTimestamp = maxTimestamp;
-      if(this.minimumVisibility == null)
-        this.minimumVisibility = minimumVisibility;
-      else
-        this.minimumVisibility = this.minimumVisibility.or(minimumVisibility);
-      new IndexEntry(key, minTimestamp, maxTimestamp, minimumVisibility, value, offset, compressedSize, rawSize, version).write(indexOut);
+      this.blockStats.updateBlockStats(blockStats);
+      new IndexEntry(key, blockStats, offset, compressedSize, rawSize, version).write(indexOut);
     }
     
     int getSize() {
@@ -414,7 +379,7 @@ public class MultiLevelIndex {
       IndexEntry ie = new IndexEntry(version);
       for (int i = 0; i < buffered; i++) {
         ie.readFields(dis);
-        writer.add(ie.getKey(), ie.minTimestamp, ie.maxTimestamp, ie.minimumVisibility, ie.getNumEntries(), ie.getOffset(), ie.getCompressedSize(), ie.getRawSize(), ie.format);
+        writer.add(ie.getKey(), ie.blockStats, ie.getOffset(), ie.getCompressedSize(), ie.getRawSize(), ie.format);
       }
       
       buffered = 0;
@@ -423,18 +388,18 @@ public class MultiLevelIndex {
       
     }
     
-    public void add(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, int version) throws IOException {
+    public void add(Key key, BlockStats blockStats, long offset, long compressedSize, long rawSize, int version) throws IOException {
       if (buffer.size() > (10 * 1 << 20)) {
         flush();
       }
       
-      new IndexEntry(key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, version).write(buffer);
+      new IndexEntry(key, blockStats, offset, compressedSize, rawSize, version).write(buffer);
       buffered++;
     }
     
-    public void addLast(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, int version) throws IOException {
+    public void addLast(Key key, BlockStats blockStats, long offset, long compressedSize, long rawSize, int version) throws IOException {
       flush();
-      writer.addLast(key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, version);
+      writer.addLast(key, blockStats, offset, compressedSize, rawSize, version);
     }
     
     public void close(DataOutput out) throws IOException {
@@ -459,7 +424,7 @@ public class MultiLevelIndex {
       levels = new ArrayList<IndexBlock>();
     }
     
-    private void add(int level, Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, boolean last, int version)
+    private void add(int level, Key key, BlockStats blockStats, long offset, long compressedSize, long rawSize, boolean last, int version)
         throws IOException {
       if (level == levels.size()) {
         levels.add(new IndexBlock(level, 0));
@@ -467,7 +432,7 @@ public class MultiLevelIndex {
       
       IndexBlock iblock = levels.get(level);
       
-      iblock.add(key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, version);
+      iblock.add(key, blockStats, offset, compressedSize, rawSize, version);
       
       if (last && level == levels.size() - 1)
         return;
@@ -478,7 +443,7 @@ public class MultiLevelIndex {
         iblock.write(out);
         out.close();
         
-        add(level + 1, key, iblock.minTimestamp, iblock.maxTimestamp, iblock.minimumVisibility, 0, out.getStartPos(), out.getCompressedSize(), out.getRawSize(), last, version);
+        add(level + 1, key, blockStats, out.getStartPos(), out.getCompressedSize(), out.getRawSize(), last, version);
         
         if (last)
           levels.set(level, null);
@@ -487,17 +452,17 @@ public class MultiLevelIndex {
       }
     }
     
-    public void add(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, int version) throws IOException {
+    public void add(Key key, BlockStats blockStats, long offset, long compressedSize, long rawSize, int version) throws IOException {
       totalAdded++;
-      add(0, key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, false, version);
+      add(0, key, blockStats, offset, compressedSize, rawSize, false, version);
     }
     
-    public void addLast(Key key, long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int data, long offset, long compressedSize, long rawSize, int version) throws IOException {
+    public void addLast(Key key, BlockStats blockStats, long offset, long compressedSize, long rawSize, int version) throws IOException {
       if (addedLast)
         throw new IllegalStateException("already added last");
       
       totalAdded++;
-      add(0, key, minTimestamp, maxTimestamp, minimumVisibility, data, offset, compressedSize, rawSize, true, version);
+      add(0, key, blockStats, offset, compressedSize, rawSize, true, version);
       addedLast = true;
       
     }
@@ -548,7 +513,7 @@ public class MultiLevelIndex {
       
       private final boolean checkFilterIndexEntry(IndexEntry ie) {
         if(timestampFilter == null)
-        if (timestampFilter != null && (ie.maxTimestamp < timestampFilter.startTimestamp || ie.minTimestamp > timestampFilter.endTimestamp)) {
+        if (timestampFilter != null && (ie.blockStats.maxTimestamp < timestampFilter.startTimestamp || ie.blockStats.minTimestamp > timestampFilter.endTimestamp)) {
           return false;
         }
         return true;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8a3ddeb3/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
index 06000f8..c4bdb7f 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
@@ -302,15 +302,12 @@ public class RFile {
     private ABlockWriter blockWriter;
     
     // private BlockAppender blockAppender;
-    private long blockSize = 100000;
-    private int indexBlockSize;
-    private int entries = 0;
-    
+    private final long blockSize;
+    private final int indexBlockSize;
+
     // some aggregate stats to keep on a per-block basis
-    private long minTimestamp = Long.MAX_VALUE;
-    private long maxTimestamp = Long.MIN_VALUE;
-    private ColumnVisibility minimumVisibility = null;
-    
+    private BlockStats blockStats = new BlockStats();
+        
     private ArrayList<LocalityGroupMetadata> localityGroups = new ArrayList<LocalityGroupMetadata>();
     private LocalityGroupMetadata currentLocalityGroup = null;
     private int nextBlock = 0;
@@ -378,27 +375,7 @@ public class RFile {
       }
     }
     
-    private void updateBlockStats(Key key, Value value)
-    {
-      if(minTimestamp > key.getTimestamp())
-        minTimestamp = key.getTimestamp();
-      if(maxTimestamp < key.getTimestamp())
-        maxTimestamp = key.getTimestamp();
-      if(minimumVisibility == null)
-        minimumVisibility = new ColumnVisibility(key.getColumnVisibility());
-      else
-        minimumVisibility = minimumVisibility.or(new ColumnVisibility(key.getColumnVisibility()));
-      entries++;
-    }
-    
-    private void clearBlockStats()
-    {
-      minTimestamp = Long.MAX_VALUE;
-      maxTimestamp = Long.MIN_VALUE;
-      minimumVisibility = null;      
-      entries = 0;
-    }
-    
+
     public void append(Key key, Value value) throws IOException {
       if (dataClosed) {
         throw new IllegalStateException("Cannont append, data closed");
@@ -425,7 +402,7 @@ public class RFile {
       
       rk.write(blockWriter);
       value.write(blockWriter);
-      updateBlockStats(key,value);
+      blockStats.updateBlockStats(key,value);
       
       
       prevKey = new Key(key);
@@ -437,11 +414,11 @@ public class RFile {
       blockWriter.close();
       
       if (lastBlock)
-        currentLocalityGroup.indexWriter.addLast(key, minTimestamp, maxTimestamp, minimumVisibility, entries, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize(), RINDEX_VER_7);
+        currentLocalityGroup.indexWriter.addLast(key, blockStats, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize(), RINDEX_VER_7);
       else
-        currentLocalityGroup.indexWriter.add(key, minTimestamp, maxTimestamp, minimumVisibility, entries, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize(), RINDEX_VER_7);
+        currentLocalityGroup.indexWriter.add(key, blockStats, blockWriter.getStartPos(), blockWriter.getCompressedSize(), blockWriter.getRawSize(), RINDEX_VER_7);
       
-      clearBlockStats();
+      blockStats = new BlockStats();
       blockWriter = null;
       lastKeyInBlock = null;
       nextBlock++;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8a3ddeb3/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
index 3da616d..b84f277 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
@@ -59,9 +59,10 @@ public class MultiLevelIndexTest extends TestCase {
     BufferedWriter mliw = new BufferedWriter(new Writer(_cbw, maxBlockSize));
     
     for (int i = 0; i < num; i++)
-      mliw.add(new Key(String.format("%05d000", i)), 0l, 0l, new ColumnVisibility(), i, 0, 0, 0, RFile.RINDEX_VER_7);
-    
-    mliw.addLast(new Key(String.format("%05d000", num)), 0l, 0l, new ColumnVisibility(), num, 0, 0, 0, RFile.RINDEX_VER_7);
+    {
+      mliw.add(new Key(String.format("%05d000", i)), new BlockStats(0,0,new ColumnVisibility(),i), 0, 0, 0, RFile.RINDEX_VER_7);
+    }
+    mliw.addLast(new Key(String.format("%05d000", num)), new BlockStats(0,0,new ColumnVisibility(),num), 0, 0, 0, RFile.RINDEX_VER_7);
     
     ABlockWriter root = _cbw.prepareMetaBlock("root");
     mliw.close(root);


[10/15] git commit: ACCUMULO-652 merged changes from trunk

Posted by el...@apache.org.
ACCUMULO-652 merged changes from trunk

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1357909 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/7bfa823e
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/7bfa823e
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/7bfa823e

Branch: refs/heads/ACCUMULO-652
Commit: 7bfa823ef28aa5cb70b296d7f3bf29b4d2e98439
Parents: 665887f 368cabd
Author: Adam Fuchs <af...@apache.org>
Authored: Thu Jul 5 21:03:16 2012 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Thu Jul 5 21:03:16 2012 +0000

----------------------------------------------------------------------
 bin/tool.sh                                     |  11 +-
 core/pom.xml                                    |   4 +
 .../org/apache/accumulo/core/Constants.java     |   2 +
 .../accumulo/core/client/ZooKeeperInstance.java |   2 +-
 .../client/admin/InstanceOperationsImpl.java    |   2 +-
 .../accumulo/core/client/impl/ServerClient.java |   2 +-
 .../accumulo/core/client/impl/Tables.java       |   2 +-
 .../core/client/mock/MockBatchScanner.java      |  20 +-
 .../core/conf/DefaultConfiguration.java         |   9 +
 .../org/apache/accumulo/core/conf/Property.java |   4 +-
 .../accumulo/core/data/ArrayByteSequence.java   |   6 +-
 .../accumulo/core/file/rfile/bcfile/BCFile.java |   2 +-
 .../accumulo/core/trace/DistributedTrace.java   |   2 +-
 .../accumulo/core/trace/ZooTraceClient.java     |   2 +-
 .../org/apache/accumulo/core/util/Merge.java    |  10 +-
 .../accumulo/core/util/SimpleThreadPool.java    |   5 +-
 .../apache/accumulo/core/util/shell/Shell.java  |   5 +-
 .../accumulo/core/zookeeper/IZooReader.java     |  41 --
 .../accumulo/core/zookeeper/ZooCache.java       | 310 -------------
 .../accumulo/core/zookeeper/ZooReader.java      |  83 ----
 .../accumulo/core/zookeeper/ZooSession.java     | 147 ------
 .../apache/accumulo/core/zookeeper/ZooUtil.java | 234 +---------
 .../core/iterators/FirstEntryInRowTest.java     |   1 +
 .../wikisearch/iterator/AndIterator.java        |  18 +-
 .../examples/wikisearch/query/Query.java        |   5 +-
 fate/pom.xml                                    |  59 +++
 .../org/apache/accumulo/fate/AdminUtil.java     | 172 +++++++
 .../java/org/apache/accumulo/fate/Fate.java     | 233 ++++++++++
 .../java/org/apache/accumulo/fate/Repo.java     |  36 ++
 .../accumulo/fate/StackOverflowException.java   |  27 ++
 .../java/org/apache/accumulo/fate/TStore.java   | 132 ++++++
 .../java/org/apache/accumulo/fate/ZooStore.java | 425 +++++++++++++++++
 .../org/apache/accumulo/fate/util/Daemon.java   |  60 +++
 .../accumulo/fate/util/LoggingRunnable.java     |  65 +++
 .../accumulo/fate/util/UtilWaitThread.java      |  31 ++
 .../zookeeper/DistributedReadWriteLock.java     | 254 ++++++++++
 .../accumulo/fate/zookeeper/IZooReader.java     |  41 ++
 .../fate/zookeeper/IZooReaderWriter.java        |  63 +++
 .../fate/zookeeper/TransactionWatcher.java      |  73 +++
 .../accumulo/fate/zookeeper/ZooCache.java       | 310 +++++++++++++
 .../apache/accumulo/fate/zookeeper/ZooLock.java | 460 +++++++++++++++++++
 .../accumulo/fate/zookeeper/ZooQueueLock.java   | 119 +++++
 .../accumulo/fate/zookeeper/ZooReader.java      |  78 ++++
 .../fate/zookeeper/ZooReaderWriter.java         | 204 ++++++++
 .../accumulo/fate/zookeeper/ZooReservation.java |  75 +++
 .../accumulo/fate/zookeeper/ZooSession.java     | 147 ++++++
 .../apache/accumulo/fate/zookeeper/ZooUtil.java | 256 +++++++++++
 .../zookeeper/DistributedReadWriteLockTest.java | 132 ++++++
 .../fate/zookeeper/TransactionWatcherTest.java  | 126 +++++
 pom.xml                                         |   6 +
 server/pom.xml                                  |   4 +
 .../accumulo/server/client/BulkImporter.java    |  42 +-
 .../accumulo/server/client/HdfsZooInstance.java |   2 +-
 .../server/conf/TableConfiguration.java         |   2 +-
 .../accumulo/server/conf/ZooConfiguration.java  |   2 +-
 .../org/apache/accumulo/server/fate/Admin.java  | 158 +------
 .../org/apache/accumulo/server/fate/Fate.java   | 234 ----------
 .../org/apache/accumulo/server/fate/Print.java  | 136 ------
 .../org/apache/accumulo/server/fate/Repo.java   |  36 --
 .../server/fate/StackOverflowException.java     |  27 --
 .../org/apache/accumulo/server/fate/TStore.java | 132 ------
 .../apache/accumulo/server/fate/ZooStore.java   | 425 -----------------
 .../server/gc/SimpleGarbageCollector.java       |   4 +-
 .../accumulo/server/master/LiveTServerSet.java  |   2 +-
 .../apache/accumulo/server/master/Master.java   |  32 +-
 .../server/master/TServerLockWatcher.java       |   4 +-
 .../server/master/recovery/RecoverLease.java    |   2 +-
 .../master/recovery/SubmitFileForRecovery.java  |  11 +-
 .../server/master/state/DeadServerList.java     |   6 +-
 .../server/master/state/SetGoalState.java       |   2 +-
 .../accumulo/server/master/state/ZooStore.java  |   6 +-
 .../master/state/tables/TableManager.java       |   8 +-
 .../server/master/tableOps/BulkImport.java      | 187 ++++++--
 .../master/tableOps/ChangeTableState.java       |   2 +-
 .../server/master/tableOps/CloneTable.java      |   4 +-
 .../server/master/tableOps/CompactRange.java    |   6 +-
 .../server/master/tableOps/CreateTable.java     |   4 +-
 .../server/master/tableOps/DeleteTable.java     |   2 +-
 .../server/master/tableOps/MasterRepo.java      |   2 +-
 .../server/master/tableOps/RenameTable.java     |   6 +-
 .../server/master/tableOps/TableRangeOp.java    |   2 +-
 .../server/master/tableOps/TraceRepo.java       |   2 +-
 .../accumulo/server/master/tableOps/Utils.java  |   8 +-
 .../master/tserverOps/ShutdownTServer.java      |   6 +-
 .../server/monitor/servlets/BasicServlet.java   |   6 +-
 .../server/monitor/servlets/DefaultServlet.java |  29 +-
 .../server/monitor/servlets/VisServlet.java     |  62 +--
 .../accumulo/server/problems/ProblemReport.java |   4 +-
 .../server/problems/ProblemReports.java         |   2 +-
 .../server/security/ZKAuthenticator.java        |   6 +-
 .../tabletserver/BulkFailedCopyProcessor.java   |  72 +++
 .../accumulo/server/tabletserver/Tablet.java    |   8 +-
 .../server/tabletserver/TabletServer.java       |  27 +-
 .../server/tabletserver/log/LogSorter.java      | 242 +++-------
 .../apache/accumulo/server/test/TestIngest.java |   4 +-
 .../accumulo/server/test/VerifyIngest.java      |   4 +-
 .../test/continuous/UndefinedAnalyzer.java      | 140 +++---
 .../server/test/functional/CacheTestClean.java  |   4 +-
 .../server/test/functional/CacheTestReader.java |   3 +-
 .../server/test/functional/CacheTestWriter.java |   7 +-
 .../test/functional/SplitRecoveryTest.java      |   8 +-
 .../server/test/functional/ZombieTServer.java   |   6 +-
 .../server/test/randomwalk/Framework.java       |   4 +-
 .../accumulo/server/test/randomwalk/State.java  |   3 +-
 .../randomwalk/security/SecurityHelper.java     |   4 -
 .../accumulo/server/test/scalability/Run.java   |   7 +-
 .../accumulo/server/trace/TraceServer.java      |   2 +-
 .../accumulo/server/util/ChangeSecret.java      |   8 +-
 .../accumulo/server/util/CleanZookeeper.java    |   4 +-
 .../accumulo/server/util/DeleteZooInstance.java |   4 +-
 .../accumulo/server/util/DumpZookeeper.java     |   2 +-
 .../accumulo/server/util/FileSystemMonitor.java |   7 +-
 .../apache/accumulo/server/util/Initialize.java |   6 +-
 .../accumulo/server/util/ListInstances.java     |   2 +-
 .../accumulo/server/util/MetadataTable.java     |   9 +-
 .../accumulo/server/util/RestoreZookeeper.java  |   2 +-
 .../accumulo/server/util/SystemPropUtil.java    |   4 +-
 .../accumulo/server/util/TablePropUtil.java     |   4 +-
 .../accumulo/server/util/TabletServerLocks.java |   2 +-
 .../org/apache/accumulo/server/util/ZooZap.java |   4 +-
 .../zookeeper/DistributedReadWriteLock.java     | 254 ----------
 .../server/zookeeper/DistributedWorkQueue.java  | 247 ++++++++++
 .../server/zookeeper/IZooReaderWriter.java      |  63 ---
 .../server/zookeeper/TransactionWatcher.java    |  66 +--
 .../accumulo/server/zookeeper/ZooCache.java     |   2 +-
 .../accumulo/server/zookeeper/ZooLock.java      | 432 +----------------
 .../accumulo/server/zookeeper/ZooQueueLock.java |  93 +---
 .../server/zookeeper/ZooReaderWriter.java       | 133 +-----
 .../server/zookeeper/ZooReservation.java        |  75 ---
 .../zookeeper/DistributedReadWriteLockTest.java | 133 ------
 .../zookeeper/TransactionWatcherTest.java       | 127 -----
 test/system/auto/simple/examples.py             |   4 +-
 test/system/auto/simple/mapreduce.py            |   3 +-
 test/system/auto/stress/logger.py               |  55 ---
 test/system/test4/bulk_import_test.sh           |  20 +-
 135 files changed, 4533 insertions(+), 3907 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bfa823e/core/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bfa823e/core/src/main/java/org/apache/accumulo/core/Constants.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bfa823e/fate/pom.xml
----------------------------------------------------------------------
diff --cc fate/pom.xml
index 0000000,50c5a94..804bd6a
mode 000000,100644..100644
--- a/fate/pom.xml
+++ b/fate/pom.xml
@@@ -1,0 -1,59 +1,59 @@@
+ <?xml version="1.0" encoding="UTF-8"?>
+ <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+ 
+       http://www.apache.org/licenses/LICENSE-2.0
+ 
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ -->
+ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ 
+   <parent>
+     <groupId>org.apache.accumulo</groupId>
+     <artifactId>accumulo</artifactId>
 -    <version>1.5.0-SNAPSHOT</version>
++    <version>ACCUMULO-652-SNAPSHOT</version>
+   </parent>
+ 
+   <modelVersion>4.0.0</modelVersion>
+   <artifactId>accumulo-fate</artifactId>
+   <name>accumulo-fate</name>
+   <build>
+     <pluginManagement>
+       <plugins>
+         <plugin>
+           <artifactId>maven-jar-plugin</artifactId>
+           <configuration>
+             <outputDirectory>../lib</outputDirectory>
+           </configuration>
+         </plugin>
+       </plugins>
+     </pluginManagement>
+   </build>
+ 
+   <dependencies>
+     <dependency>
+       <groupId>log4j</groupId>
+       <artifactId>log4j</artifactId>
+     </dependency>
+ 
+     <dependency>
+       <groupId>commons-lang</groupId>
+       <artifactId>commons-lang</artifactId>
+     </dependency>
+ 
+     <dependency>
+       <groupId>org.apache.hadoop</groupId>
+       <artifactId>zookeeper</artifactId>
+     </dependency>
+   </dependencies>
+ 
+ </project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bfa823e/pom.xml
----------------------------------------------------------------------
diff --cc pom.xml
index d9e2950,9f03de0..a8fe066
--- a/pom.xml
+++ b/pom.xml
@@@ -528,8 -529,13 +529,13 @@@
        </dependency>
        <dependency>
          <groupId>org.apache.accumulo</groupId>
+         <artifactId>accumulo-fate</artifactId>
 -        <version>1.5.0-SNAPSHOT</version>
++        <version>ACCUMULO-652-SNAPSHOT</version>
+       </dependency>
+       <dependency>
+         <groupId>org.apache.accumulo</groupId>
          <artifactId>accumulo-start</artifactId>
 -        <version>1.5.0-SNAPSHOT</version>
 +        <version>ACCUMULO-652-SNAPSHOT</version>
        </dependency>
        <dependency>
          <groupId>org.apache.accumulo</groupId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7bfa823e/server/pom.xml
----------------------------------------------------------------------


[08/15] git commit: ACCUMULO-652 added column visibility block indexing and unit test, added Filterer interface to the system iterators, made VisibilityFilter use the new filtering capability, added notion of optional filtering to the Filterer interface

Posted by el...@apache.org.
ACCUMULO-652 added column visibility block indexing and unit test, added Filterer interface to the system iterators, made VisibilityFilter use the new filtering capability, added notion of optional filtering to the Filterer interface

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-652@1355353 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/af8cefa7
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/af8cefa7
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/af8cefa7

Branch: refs/heads/ACCUMULO-652
Commit: af8cefa7acd13f5241078bcf70dfbcc20aa5a935
Parents: 727e61e
Author: Adam Fuchs <af...@apache.org>
Authored: Fri Jun 29 13:11:48 2012 +0000
Committer: Adam Fuchs <af...@apache.org>
Committed: Fri Jun 29 13:11:48 2012 +0000

----------------------------------------------------------------------
 .../accumulo/core/file/rfile/BlockStats.java    | 117 +++++++++++++++++++
 .../apache/accumulo/core/file/rfile/RFile.java  |  22 ++--
 .../accumulo/core/iterators/Filterer.java       |  11 +-
 .../core/iterators/WrappingIterator.java        |  11 +-
 .../predicates/ColumnVisibilityPredicate.java   |  26 +++++
 .../core/iterators/system/MultiIterator.java    |  15 ++-
 .../system/SourceSwitchingIterator.java         |  35 +++++-
 .../iterators/system/SynchronizedIterator.java  |  13 ++-
 .../iterators/system/TimeSettingIterator.java   |  13 ++-
 .../core/iterators/system/VisibilityFilter.java |  14 ++-
 .../file/rfile/AuthorizationFilterTest.java     | 109 +++++++++++++++++
 .../core/file/rfile/TimestampFilterTest.java    |   3 +-
 12 files changed, 370 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockStats.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockStats.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockStats.java
new file mode 100644
index 0000000..d1b1eac
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/BlockStats.java
@@ -0,0 +1,117 @@
+package org.apache.accumulo.core.file.rfile;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.hadoop.io.Writable;
+
+public class BlockStats implements Writable {
+  
+  private static ColumnVisibility emptyVisibility = new ColumnVisibility();
+  private static int maxVisibilityLength = 100;
+  
+  public BlockStats(long minTimestamp, long maxTimestamp, ColumnVisibility minimumVisibility, int entries) {
+    this.minTimestamp = minTimestamp;
+    this.maxTimestamp = maxTimestamp;
+    this.minimumVisibility = minimumVisibility;
+    this.entries = entries;
+    this.version = RFile.RINDEX_VER_7;
+  }
+  
+  long minTimestamp = Long.MAX_VALUE;
+  long maxTimestamp = Long.MIN_VALUE;
+  ColumnVisibility minimumVisibility = null;
+  int entries = 0;
+  final int version;
+  
+  public void updateBlockStats(Key key, Value value) {
+    if (minTimestamp > key.getTimestamp())
+      minTimestamp = key.getTimestamp();
+    if (maxTimestamp < key.getTimestamp())
+      maxTimestamp = key.getTimestamp();
+    entries++;
+    if (key.getColumnVisibilityData().length() > 0)
+      combineVisibilities(new ColumnVisibility(key.getColumnVisibility()));
+    else
+      combineVisibilities(emptyVisibility);
+  }
+  
+  private void combineVisibilities(ColumnVisibility other) {
+    if (minimumVisibility == null)
+      minimumVisibility = other;
+    else
+      minimumVisibility = minimumVisibility.or(other);
+  }
+  
+  public void updateBlockStats(BlockStats other) {
+    this.entries += other.entries;
+    if (this.minTimestamp > other.minTimestamp)
+      this.minTimestamp = other.minTimestamp;
+    if (this.maxTimestamp < other.maxTimestamp)
+      this.maxTimestamp = other.maxTimestamp;
+    combineVisibilities(other.minimumVisibility);
+  }
+  
+  public BlockStats() {
+    minTimestamp = Long.MAX_VALUE;
+    maxTimestamp = Long.MIN_VALUE;
+    minimumVisibility = null;
+    entries = 0;
+    version = RFile.RINDEX_VER_7;
+  }
+  
+  public BlockStats(int version) {
+    this.version = version;
+  }
+  
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    if (version == RFile.RINDEX_VER_7) {
+      minTimestamp = in.readLong();
+      maxTimestamp = in.readLong();
+      int visibilityLength = in.readInt();
+      if (visibilityLength >= 0) {
+        byte[] visibility = new byte[visibilityLength];
+        in.readFully(visibility);
+        minimumVisibility = new ColumnVisibility(visibility);
+      } else {
+        minimumVisibility = null;
+      }
+    } else {
+      minTimestamp = Long.MIN_VALUE;
+      maxTimestamp = Long.MAX_VALUE;
+      minimumVisibility = null;
+    }
+    entries = in.readInt();
+  }
+  
+  @Override
+  public void write(DataOutput out) throws IOException {
+    if (version == RFile.RINDEX_VER_7) {
+      out.writeLong(minTimestamp);
+      out.writeLong(maxTimestamp);
+      if (minimumVisibility == null)
+        out.writeInt(-1);
+      else {
+        byte[] visibility = minimumVisibility.getExpression();
+        if (visibility.length > maxVisibilityLength) {
+          System.out.println("expression too large: "+toString());
+          out.writeInt(0);
+        } else {
+          out.writeInt(visibility.length);
+          out.write(visibility);
+        }
+      }
+    }
+    out.writeInt(entries);
+  }
+  
+  @Override
+  public String toString() {
+    return "{"+entries+";"+minTimestamp+";"+maxTimestamp+";"+minimumVisibility+"}";
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
index 5e1e8a3..d250155 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFile.java
@@ -784,7 +784,9 @@ public class RFile {
      * @see org.apache.accumulo.core.iterators.Filterer#applyFilter(org.apache.accumulo.core.iterators.Predicate)
      */
     @Override
-    public void applyFilter(Predicate<Key,Value> filter) {
+    public void applyFilter(Predicate<Key,Value> filter, boolean required) {
+      if(required)
+        throw new UnsupportedOperationException("Cannot guarantee filtration");
       // TODO support general filters
       if(filter instanceof TimestampRangePredicate)
       {
@@ -797,16 +799,12 @@ public class RFile {
           timestampRange = p;
         index.setTimestampRange(timestampRange);
       }
-      else if(filter instanceof ColumnVisibilityPredicate)
+      if(filter instanceof ColumnVisibilityPredicate)
       {
     	  filterChanged = true;
     	  columnVisibilityPredicate = (ColumnVisibilityPredicate)filter;
     	  index.setColumnVisibilityPredicate(columnVisibilityPredicate);
       }
-      else
-      {
-        throw new RuntimeException("yikes, not yet implemented");
-      }
     }
   }
   
@@ -1042,7 +1040,9 @@ public class RFile {
         
         if (include) {
           if(timestampFilter != null)
-            lgr.applyFilter(timestampFilter);
+            lgr.applyFilter(timestampFilter,false);
+          if(columnVisibilityPredicate != null)
+            lgr.applyFilter(columnVisibilityPredicate,false);
           lgr.seek(range, EMPTY_CF_SET, false);
           addSource(lgr);
           numLGSeeked++;
@@ -1093,6 +1093,7 @@ public class RFile {
     ArrayList<Predicate<Key,Value>> filters = new ArrayList<Predicate<Key,Value>>();
     
     TimestampRangePredicate timestampFilter = null;
+    ColumnVisibilityPredicate columnVisibilityPredicate = null;
     
     Key topKey;
     Value topValue;
@@ -1171,11 +1172,14 @@ public class RFile {
      * @see org.apache.accumulo.core.iterators.Filterer#applyFilter(org.apache.accumulo.core.iterators.Predicate)
      */
     @Override
-    public void applyFilter(Predicate<Key,Value> filter) {
-      filters.add(filter);
+    public void applyFilter(Predicate<Key,Value> filter, boolean required) {
+      if(required)
+        filters.add(filter);
       // the HeapIterator will pass this filter on to its children, a collection of LocalityGroupReaders
       if(filter instanceof TimestampRangePredicate)
         this.timestampFilter = (TimestampRangePredicate)filter;
+      if(filter instanceof ColumnVisibilityPredicate)
+        this.columnVisibilityPredicate = (ColumnVisibilityPredicate)filter;
     }
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/iterators/Filterer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/Filterer.java b/core/src/main/java/org/apache/accumulo/core/iterators/Filterer.java
index bda3665..6743cbc 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/Filterer.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/Filterer.java
@@ -17,8 +17,15 @@
 package org.apache.accumulo.core.iterators;
 
 /**
- * 
+ * An interface designed to be added to containers to specify what
+ * can be left out when iterating over the contents of that container.
  */
 public interface Filterer<K,V> {
-  public void applyFilter(Predicate<K,V> filter);
+  /**
+   * Either optionally or always leave out entries for which the given Predicate evaluates to false 
+   * @param filter The predicate that specifies whether an entry can be left out
+   * @param required If true, entries that don't pass the filter must be left out. If false, then treat
+   *          purely as a potential optimization.
+   */
+  public void applyFilter(Predicate<K,V> filter, boolean required);
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java
index a9c7f2d..84ffb7c 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/WrappingIterator.java
@@ -25,7 +25,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 
-public abstract class WrappingIterator implements SortedKeyValueIterator<Key,Value> {
+public abstract class WrappingIterator implements SortedKeyValueIterator<Key,Value>, Filterer<Key,Value> {
   
   private SortedKeyValueIterator<Key,Value> source = null;
   boolean seenSeek = false;
@@ -93,4 +93,13 @@ public abstract class WrappingIterator implements SortedKeyValueIterator<Key,Val
     seenSeek = true;
   }
   
+  @SuppressWarnings("unchecked")
+  @Override
+  public void applyFilter(Predicate<Key,Value> filter, boolean required) {
+    if(source instanceof Filterer)
+      ((Filterer<Key,Value>)source).applyFilter(filter, required);
+    else if(required)
+      throw new IllegalArgumentException("Cannot require filter of underlying iterator");
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/iterators/predicates/ColumnVisibilityPredicate.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/predicates/ColumnVisibilityPredicate.java b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/ColumnVisibilityPredicate.java
new file mode 100644
index 0000000..cb1b521
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/predicates/ColumnVisibilityPredicate.java
@@ -0,0 +1,26 @@
+package org.apache.accumulo.core.iterators.predicates;
+
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Predicate;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+
+public final class ColumnVisibilityPredicate implements Predicate<Key, Value> {
+	public final Authorizations auths;
+
+	public ColumnVisibilityPredicate(Authorizations auths)
+	{
+		this.auths = auths;
+	}
+	
+	@Override
+	public boolean evaluate(Key k, Value v) {
+		return new ColumnVisibility(k.getColumnVisibility()).evaluate(auths);
+	}
+	
+	@Override
+	public String toString() {
+	  return "{"+auths+"}";
+	}
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/iterators/system/MultiIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/MultiIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/MultiIterator.java
index f406fee..b219c2d 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/MultiIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/MultiIterator.java
@@ -27,7 +27,9 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filterer;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 
 /**
@@ -37,7 +39,7 @@ import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
  * 
  */
 
-public class MultiIterator extends HeapIterator {
+public class MultiIterator extends HeapIterator implements Filterer<Key,Value> {
   
   private List<SortedKeyValueIterator<Key,Value>> iters;
   private Range fence;
@@ -111,4 +113,15 @@ public class MultiIterator extends HeapIterator {
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     throw new UnsupportedOperationException();
   }
+  
+  @SuppressWarnings("unchecked")
+  @Override
+  public void applyFilter(Predicate<Key,Value> filter, boolean required) {
+    for(SortedKeyValueIterator<Key,Value> skvi: iters) {
+      if(skvi instanceof Filterer)
+        ((Filterer<Key,Value>)skvi).applyFilter(filter, required);
+      else if(required)
+        throw new IllegalArgumentException("Cannot require filter of underlying iterator");
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIterator.java
index b7069c9..bd907ed 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/SourceSwitchingIterator.java
@@ -20,8 +20,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.accumulo.core.data.ByteSequence;
@@ -29,10 +31,12 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filterer;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 
-public class SourceSwitchingIterator implements SortedKeyValueIterator<Key,Value>, InterruptibleIterator {
+public class SourceSwitchingIterator implements SortedKeyValueIterator<Key,Value>, InterruptibleIterator, Filterer<Key,Value> {
   
   public interface DataSource {
     boolean isCurrent();
@@ -144,6 +148,7 @@ public class SourceSwitchingIterator implements SortedKeyValueIterator<Key,Value
     while (!source.isCurrent()) {
       source = source.getNewDataSource();
       iter = source.iterator();
+      applyExistingFilters();
       if (iflag != null)
         ((InterruptibleIterator) iter).setInterruptFlag(iflag);
       
@@ -161,6 +166,7 @@ public class SourceSwitchingIterator implements SortedKeyValueIterator<Key,Value
     
     if (iter == null) {
       iter = source.iterator();
+      applyExistingFilters();
       if (iflag != null)
         ((InterruptibleIterator) iter).setInterruptFlag(iflag);
     }
@@ -197,4 +203,31 @@ public class SourceSwitchingIterator implements SortedKeyValueIterator<Key,Value
     
   }
   
+  private Map<Predicate<Key,Value>,Boolean> filters = new HashMap<Predicate<Key,Value>,Boolean>();
+  
+  private void applyExistingFilters()
+  {
+    for(Entry<Predicate<Key,Value>,Boolean> filter:filters.entrySet())
+    {
+      _applyFilter(filter.getKey(), filter.getValue());
+    }
+  }
+  
+  @SuppressWarnings("unchecked")
+  private void _applyFilter(Predicate<Key,Value> filter, boolean required)
+  {
+    if(iter != null && iter instanceof Filterer)
+      ((Filterer<Key,Value>)iter).applyFilter(filter, required);
+    else if(iter != null && required)
+      throw new IllegalArgumentException("Cannot require filter of underlying iterator");
+  }
+  
+  @Override
+  public void applyFilter(Predicate<Key,Value> filter, boolean required) {
+    // apply filter to the current data source
+    _applyFilter(filter,required);
+    // save filter for application to future data sources
+    filters.put(filter, required);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/iterators/system/SynchronizedIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/SynchronizedIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/SynchronizedIterator.java
index 2657bab..b8227be 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/SynchronizedIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/SynchronizedIterator.java
@@ -22,7 +22,9 @@ import java.util.Map;
 
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.iterators.Filterer;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
+import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
@@ -30,7 +32,7 @@ import org.apache.hadoop.io.WritableComparable;
 /***
  * SynchronizedIterator: wrap a SortedKeyValueIterator so that all of its methods are synchronized
  */
-public class SynchronizedIterator<K extends WritableComparable<?>,V extends Writable> implements SortedKeyValueIterator<K,V> {
+public class SynchronizedIterator<K extends WritableComparable<?>,V extends Writable> implements SortedKeyValueIterator<K,V>, Filterer<K,V> {
   
   private SortedKeyValueIterator<K,V> source = null;
   
@@ -75,4 +77,13 @@ public class SynchronizedIterator<K extends WritableComparable<?>,V extends Writ
   public SynchronizedIterator(SortedKeyValueIterator<K,V> source) {
     this.source = source;
   }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public void applyFilter(Predicate<K,V> filter, boolean required) {
+    if(source instanceof Filterer)
+      ((Filterer<K,V>)source).applyFilter(filter, required);
+    else if(required)
+      throw new IllegalArgumentException("cannot guarantee filter with non filterer source");
+  }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/iterators/system/TimeSettingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/TimeSettingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/TimeSettingIterator.java
index 4eef14d..1dff72f 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/TimeSettingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/TimeSettingIterator.java
@@ -25,11 +25,13 @@ import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filterer;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.IteratorUtil;
+import org.apache.accumulo.core.iterators.Predicate;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 
-public class TimeSettingIterator implements InterruptibleIterator {
+public class TimeSettingIterator implements InterruptibleIterator, Filterer<Key,Value> {
   
   private SortedKeyValueIterator<Key,Value> source;
   private long time;
@@ -88,5 +90,14 @@ public class TimeSettingIterator implements InterruptibleIterator {
   public Value getTopValue() {
     return source.getTopValue();
   }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public void applyFilter(Predicate<Key,Value> filter, boolean required) {
+    if(source instanceof Filterer)
+      ((Filterer<Key,Value>)source).applyFilter(filter, required);
+    else if(required)
+      throw new IllegalArgumentException("cannot guarantee filter with non filterer source");
+  }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
index a4391c0..2c05a03 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/system/VisibilityFilter.java
@@ -16,11 +16,16 @@
  */
 package org.apache.accumulo.core.iterators.system;
 
+import java.io.IOException;
+import java.util.Map;
+
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.Filter;
+import org.apache.accumulo.core.iterators.Filterer;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.util.TextUtil;
@@ -43,7 +48,14 @@ public class VisibilityFilter extends Filter {
     this.cache = new LRUMap(1000);
     this.tmpVis = new Text();
   }
-  
+
+  @Override
+  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
+    super.init(source, options, env);
+    if(source instanceof Filterer)
+      ((Filterer<Key,Value>)source).applyFilter(new ColumnVisibilityPredicate(auths), false);
+  }
+
   @Override
   public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
     return new VisibilityFilter(getSource().deepCopy(env), auths, TextUtil.getBytes(defaultVisibility));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
new file mode 100644
index 0000000..7dac68b
--- /dev/null
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/AuthorizationFilterTest.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.file.rfile;
+
+import static org.junit.Assert.*;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.util.Collections;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
+import org.apache.accumulo.core.file.rfile.RFileTest.SeekableByteArrayInputStream;
+import org.apache.accumulo.core.iterators.Predicate;
+import org.apache.accumulo.core.iterators.predicates.ColumnVisibilityPredicate;
+import org.apache.accumulo.core.iterators.predicates.TimestampRangePredicate;
+import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.junit.Test;
+
+public class AuthorizationFilterTest {
+  
+  @Test
+  public void testRFileAuthorizationFiltering() throws Exception {
+    Authorizations auths = new Authorizations("a", "b", "c");
+    Predicate<Key,Value> columnVisibilityPredicate = new ColumnVisibilityPredicate(auths);
+    int expected = 0;
+    Random r = new Random();
+    Configuration conf = new Configuration();
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    FSDataOutputStream dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
+    CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(dos, "gz", conf);
+    RFile.Writer writer = new RFile.Writer(_cbw, 1000, 1000);
+    writer.startDefaultLocalityGroup();
+    byte[] row = new byte[10];
+    byte[] colFam = new byte[10];
+    byte[] colQual = new byte[10];
+    Value value = new Value(new byte[0]);
+    TreeMap<Key,Value> inputBuffer = new TreeMap<Key,Value>();
+    ColumnVisibility[] goodColVises = {new ColumnVisibility("a&b"), new ColumnVisibility("b&c"), new ColumnVisibility("a&c")};
+    ColumnVisibility[] badColVises = {new ColumnVisibility("x"), new ColumnVisibility("y"), new ColumnVisibility("a&z")};
+    for (ColumnVisibility colVis : goodColVises)
+      for (int i = 0; i < 10; i++) {
+        r.nextBytes(row);
+        r.nextBytes(colFam);
+        r.nextBytes(colQual);
+        Key k = new Key(row, colFam, colQual, colVis.getExpression(), (long) i);
+        if (columnVisibilityPredicate.evaluate(k, value))
+          expected++;
+        inputBuffer.put(k, value);
+      }
+    for (ColumnVisibility colVis : badColVises)
+      for (int i = 0; i < 10000; i++) {
+        r.nextBytes(row);
+        r.nextBytes(colFam);
+        r.nextBytes(colQual);
+        Key k = new Key(row, colFam, colQual, colVis.getExpression(), (long) i);
+        if (columnVisibilityPredicate.evaluate(k, value))
+          expected++;
+        inputBuffer.put(k, value);
+      }
+    for (Entry<Key,Value> e : inputBuffer.entrySet()) {
+      writer.append(e.getKey(), e.getValue());
+    }
+    writer.close();
+    
+    // scan the RFile to bring back keys in a given timestamp range
+    byte[] data = baos.toByteArray();
+    
+    ByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
+    FSDataInputStream in = new FSDataInputStream(bais);
+    CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, conf);
+    RFile.Reader reader = new RFile.Reader(_cbr);
+    int count = 0;
+    reader.applyFilter(columnVisibilityPredicate,true);
+    reader.seek(new Range(), Collections.EMPTY_SET, false);
+    while (reader.hasTop()) {
+      count++;
+      assertTrue(columnVisibilityPredicate.evaluate(reader.getTopKey(), reader.getTopValue()));
+      reader.next();
+    }
+    assertEquals(expected, count);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/af8cefa7/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
index 463c779..160d7bd 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/TimestampFilterTest.java
@@ -45,7 +45,6 @@ public class TimestampFilterTest {
   
   @Test
   public void testRFileTimestampFiltering() throws Exception {
-    // TODO create an RFile with increasing timestamp and random key order
     Predicate<Key,Value> timeRange = new TimestampRangePredicate(73, 117);
     int expected = 0;
     Random r = new Random();
@@ -84,7 +83,7 @@ public class TimestampFilterTest {
     CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, conf);
     RFile.Reader reader = new RFile.Reader(_cbr);
     int count = 0;
-    reader.applyFilter(timeRange);
+    reader.applyFilter(timeRange,true);
     reader.seek(new Range(), Collections.EMPTY_SET, false);
     while(reader.hasTop())
     {