You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2010/05/07 21:17:55 UTC

svn commit: r942184 [1/15] - in /hadoop/hbase/branches/0.20: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/ src/java/org/apache/hadoop/hbase/io...

Author: stack
Date: Fri May  7 19:17:48 2010
New Revision: 942184

URL: http://svn.apache.org/viewvc?rev=942184&view=rev
Log:
HBASE-2518 Kill all the trailing whitespaces in the code base

Modified:
    hadoop/hbase/branches/0.20/CHANGES.txt
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/Chore.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ClusterStatus.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HConstants.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMerge.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMsg.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionInfo.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionLocation.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerAddress.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerInfo.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerLoad.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HStoreKey.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/KeyValue.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/LeaseListener.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/Leases.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/NotServingRegionException.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/TableExistsException.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ValueOverMaxLengthException.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/VersionAnnotation.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Delete.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Get.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HConnection.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HTable.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HTablePool.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/MetaScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/MultiPut.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/MultiPutResponse.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Put.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Result.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/ResultScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/RowLock.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/ScannerCallable.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/ServerCallable.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/ServerConnection.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/UnmodifyableHRegionInfo.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/BinaryComparator.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/CompareFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/Filter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/FilterList.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/InclusiveStopRowFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/PageFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/QualifierFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/RowFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/SkipFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/SubstringComparator.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/ValueFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/filter/package-info.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/BatchOperation.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/BatchUpdate.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/Cell.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/CodeToClassAndBack.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/HeapSize.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/Reference.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/RowResult.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/TimeRange.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/hfile/Compression.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseClient.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCProtocolVersion.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseRPCStatistics.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseRpcMetrics.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/BuildTableIndex.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/Driver.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexOutputFormat.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/IndexTableReduce.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/RowCounter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapred/package-info.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/BuildTableIndex.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Driver.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Export.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/Import.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexConfiguration.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexOutputFormat.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexRecordWriter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/IndexTableReducer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/LuceneDocumentWrapper.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/mapreduce/package-info.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/AddColumn.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/HMaster.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaRegion.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/MetaScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/RegionManager.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/RegionServerOperation.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/RetryableMetaOperation.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/RootScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ServerManager.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/TableDelete.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/TableOperation.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/ZKMasterAddressWatcher.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/metrics/MetricsMBeanBase.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/metrics/MetricsRate.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreFileToStoreFile.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/HStoreKey.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/io/BloomFilterMapFile.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HBaseMapFile.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/io/HalfMapFileReader.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/io/Reference.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/BloomFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/CountingBloomFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/DynamicBloomFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Filter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/HashFunction.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/Key.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RemoveScheme.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/onelab/filter/RetouchedBloomFilter.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/migration/nineteen/regionserver/HStoreFile.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/DeleteCompare.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/FailedLogCloseException.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/GetDeleteTracker.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/MemStore.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/QueryMatcher.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/ReadWriteConsistencyControl.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/Store.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/StoreFileGetScan.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/WALEdit.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/WildcardColumnTracker.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/AbstractController.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/RowController.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/RowModel.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/ScannerController.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/Status.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/TableController.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/TableModel.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/IOError.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/TCell.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Base64.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Bytes.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/ClassSize.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/FSUtils.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Hash.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/InfoServer.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/JenkinsHash.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Keying.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Merge.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Migrate.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/MurmurHash.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Sleeper.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/SoftValueMap.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/SoftValueSortedMap.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Strings.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Threads.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/VersionInfo.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/Writables.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java

Modified: hadoop/hbase/branches/0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/CHANGES.txt?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.20/CHANGES.txt Fri May  7 19:17:48 2010
@@ -186,6 +186,8 @@ Release X.X.X - Unreleased
    HBASE-2496  Less ArrayList churn on the scan path
    HBASE-2414  Enhance test suite to be able to specify distributed scenarios
    HBASE-2520  Cleanup arrays vs Lists of scanners (Todd Lipcon via Stack)
+   HBASE-2518  Kill all the trailing whitespaces in the code base
+               (Benoit Sigoure via Stack)
 
   NEW FEATURES
    HBASE-2257  [stargate] multiuser mode

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/Chore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/Chore.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/Chore.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/Chore.java Fri May  7 19:17:48 2010
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.util.Slee
  * If an unhandled exception, the threads exit is logged.
  * Implementers just need to add checking if there is work to be done and if
  * so, do it.  Its the base of most of the chore threads in hbase.
- * 
+ *
  * Don't subclass Chore if the task relies on being woken up for something to
  * do, such as an entry being added to a queue, etc.
  */
@@ -39,7 +39,7 @@ public abstract class Chore extends Thre
   private final Log LOG = LogFactory.getLog(this.getClass());
   private final Sleeper sleeper;
   protected volatile AtomicBoolean stop;
-  
+
   /**
    * @param p Period at which we should run.  Will be adjusted appropriately
    * should we find work and it takes time to complete.
@@ -82,7 +82,7 @@ public abstract class Chore extends Thre
       LOG.info(getName() + " exiting");
     }
   }
-  
+
   /**
    * If the thread is currently sleeping, trigger the core to happen immediately.
    * If it's in the middle of its operation, will begin another operation
@@ -91,7 +91,7 @@ public abstract class Chore extends Thre
   public void triggerNow() {
     this.sleeper.skipSleepCycle();
   }
-  
+
   /**
    * Override to run a task before we start looping.
    * @return true if initial chore was successful
@@ -100,7 +100,7 @@ public abstract class Chore extends Thre
     // Default does nothing.
     return true;
   }
-  
+
   /**
    * Look for chores.  If any found, do them else just return.
    */

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ClusterStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ClusterStatus.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ClusterStatus.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/ClusterStatus.java Fri May  7 19:17:48 2010
@@ -177,7 +177,7 @@ public class ClusterStatus extends Versi
   /**
    * Returns detailed region server information: A list of
    * {@link HServerInfo}, containing server load and resource usage
-   * statistics as {@link HServerLoad}, containing per-region 
+   * statistics as {@link HServerLoad}, containing per-region
    * statistics as {@link HServerLoad.RegionLoad}.
    * @return region server information
    */

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HBaseConfiguration.java Fri May  7 19:17:48 2010
@@ -33,7 +33,7 @@ public class HBaseConfiguration extends 
     super();
     addHbaseResources();
   }
-  
+
   /**
    * Create a clone of passed configuration.
    * @param c Configuration to clone.
@@ -44,16 +44,16 @@ public class HBaseConfiguration extends 
       set(e.getKey(), e.getValue());
     }
   }
-  
+
   private void addHbaseResources() {
     addResource("hbase-default.xml");
     addResource("hbase-site.xml");
   }
-  
+
   /**
    * Returns the hash code value for this HBaseConfiguration. The hash code of a
    * HBaseConfiguration is defined by the xor of the hash codes of its entries.
-   * 
+   *
    * @see Configuration#iterator() How the entries are obtained.
    */
   @Override
@@ -75,7 +75,7 @@ public class HBaseConfiguration extends 
       return false;
     if (!(obj instanceof HBaseConfiguration))
       return false;
-    
+
     HBaseConfiguration otherConf = (HBaseConfiguration) obj;
     if (size() != otherConf.size()) {
       return false;
@@ -89,9 +89,9 @@ public class HBaseConfiguration extends 
         return false;
       }
     }
-    
+
     return true;
   }
-  
-  
+
+
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java Fri May  7 19:17:48 2010
@@ -39,7 +39,7 @@ import org.apache.hadoop.io.WritableComp
 /**
  * An HColumnDescriptor contains information about a column family such as the
  * number of versions, compression settings, etc.
- * 
+ *
  * It is used as input when creating a table or adding a column. Once set, the
  * parameters that specify a column cannot be changed without deleting the
  * column and recreating it. If there is data stored in the column, it will be
@@ -55,7 +55,7 @@ public class HColumnDescriptor implement
   // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
   private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)7;
 
-  /** 
+  /**
    * The type of compression.
    * @see org.apache.hadoop.io.SequenceFile.Writer
    * @deprecated Compression now means which compression library
@@ -64,7 +64,7 @@ public class HColumnDescriptor implement
   @Deprecated
   public static enum CompressionType {
     /** Do not compress records. */
-    NONE, 
+    NONE,
     /** Compress values only, each separately. */
     RECORD,
     /** Compress sequences of records together in blocks. */
@@ -118,7 +118,7 @@ public class HColumnDescriptor implement
    * Default setting for whether or not to use bloomfilters.
    */
   public static final boolean DEFAULT_BLOOMFILTER = false;
-  
+
   /**
    * Default time to live of cell contents.
    */
@@ -144,20 +144,20 @@ public class HColumnDescriptor implement
   }
 
   /**
-   * Construct a column descriptor specifying only the family name 
+   * Construct a column descriptor specifying only the family name
    * The other attributes are defaulted.
-   * 
+   *
    * @param familyName Column family name. Must be 'printable' -- digit or
    * letter -- and end in a <code>:<code>
    */
   public HColumnDescriptor(final String familyName) {
     this(Bytes.toBytes(familyName));
   }
-  
+
   /**
-   * Construct a column descriptor specifying only the family name 
+   * Construct a column descriptor specifying only the family name
    * The other attributes are defaulted.
-   * 
+   *
    * @param familyName Column family name. Must be 'printable' -- digit or
    * letter -- and end in a <code>:<code>
    */
@@ -170,7 +170,7 @@ public class HColumnDescriptor implement
 
   /**
    * Constructor.
-   * Makes a deep copy of the supplied descriptor. 
+   * Makes a deep copy of the supplied descriptor.
    * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
    * @param desc The descriptor.
    */
@@ -195,8 +195,8 @@ public class HColumnDescriptor implement
    * @param timeToLive Time-to-live of cell contents, in seconds
    * (use HConstants.FOREVER for unlimited TTL)
    * @param bloomFilter Enable the specified bloom filter for this column
-   * 
-   * @throws IllegalArgumentException if passed a family name that is made of 
+   *
+   * @throws IllegalArgumentException if passed a family name that is made of
    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> and does not
    * end in a <code>:</code>
    * @throws IllegalArgumentException if the number of versions is &lt;= 0
@@ -212,7 +212,7 @@ public class HColumnDescriptor implement
   /**
    * Backwards compatible Constructor.  Maximum value length is no longer
    * configurable.
-   * 
+   *
    * @param familyName Column family name. Must be 'printable' -- digit or
    * letter -- and end in a <code>:<code>
    * @param maxVersions Maximum number of versions to keep
@@ -225,8 +225,8 @@ public class HColumnDescriptor implement
    * @param timeToLive Time-to-live of cell contents, in seconds
    * (use HConstants.FOREVER for unlimited TTL)
    * @param bloomFilter Enable the specified bloom filter for this column
-   * 
-   * @throws IllegalArgumentException if passed a family name that is made of 
+   *
+   * @throws IllegalArgumentException if passed a family name that is made of
    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> and does not
    * end in a <code>:</code>
    * @throws IllegalArgumentException if the number of versions is &lt;= 0
@@ -235,12 +235,12 @@ public class HColumnDescriptor implement
 //  public HColumnDescriptor(final byte [] familyName, final int maxVersions,
 //      final String compression, final boolean inMemory,
 //      final boolean blockCacheEnabled, final int blocksize,
-//      final int maxValueLength, 
+//      final int maxValueLength,
 //      final int timeToLive, final boolean bloomFilter) {
 //    this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
 //        blocksize, timeToLive, bloomFilter);
 //  }
-  
+
   /**
    * Constructor
    * @param familyName Column family name. Must be 'printable' -- digit or
@@ -254,8 +254,8 @@ public class HColumnDescriptor implement
    * @param timeToLive Time-to-live of cell contents, in seconds
    * (use HConstants.FOREVER for unlimited TTL)
    * @param bloomFilter Enable the specified bloom filter for this column
-   * 
-   * @throws IllegalArgumentException if passed a family name that is made of 
+   *
+   * @throws IllegalArgumentException if passed a family name that is made of
    * other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> and does not
    * end in a <code>:</code>
    * @throws IllegalArgumentException if the number of versions is &lt;= 0
@@ -398,7 +398,7 @@ public class HColumnDescriptor implement
     String n = getValue(COMPRESSION);
     return Compression.Algorithm.valueOf(n.toUpperCase());
   }
-  
+
   /** @return maximum number of versions */
   public synchronized int getMaxVersions() {
     if (this.cachedMaxVersions == -1) {
@@ -469,7 +469,7 @@ public class HColumnDescriptor implement
       return Boolean.valueOf(value).booleanValue();
     return DEFAULT_IN_MEMORY;
   }
-  
+
   /**
    * @param inMemory True if we are to keep all values in the HRegionServer
    * cache
@@ -591,7 +591,7 @@ public class HColumnDescriptor implement
     result ^= values.hashCode();
     return result;
   }
-  
+
   // Writable
 
   public void readFields(DataInput in) throws IOException {
@@ -678,6 +678,6 @@ public class HColumnDescriptor implement
    * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML()
    */
   public void restSerialize(IRestSerializer serializer) throws HBaseRestException {
-    serializer.serializeColumnDescriptor(this);    
+    serializer.serializeColumnDescriptor(this);
   }
 }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HConstants.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HConstants.java Fri May  7 19:17:48 2010
@@ -36,12 +36,12 @@ public interface HConstants {
   //TODO: ZEROS is only used in HConnectionManager and MetaScanner. Move to
   //      client package and change visibility to default
   static final String ZEROES = "00000000000000";
-  
+
   // For migration
 
   /** name of version file */
   static final String VERSION_FILE_NAME = "hbase.version";
-  
+
   /**
    * Current version of file system.
    * Version 4 supports only one kind of bloom filter.
@@ -51,17 +51,17 @@ public interface HConstants {
    */
   // public static final String FILE_SYSTEM_VERSION = "6";
   public static final String FILE_SYSTEM_VERSION = "7";
-  
+
   // Configuration parameters
-  
+
   //TODO: Is having HBase homed on port 60k OK?
-  
+
   /** Cluster is in distributed mode or not */
   static final String CLUSTER_DISTRIBUTED = "hbase.cluster.distributed";
-  
+
   /** Cluster is standalone or pseudo-distributed */
   static final String CLUSTER_IS_LOCAL = "false";
-  
+
   /** Cluster is fully-distributed */
   static final String CLUSTER_IS_DISTRIBUTED = "true";
 
@@ -107,10 +107,10 @@ public interface HConstants {
 
   /** Parameter name for what region server interface to use. */
   static final String REGION_SERVER_CLASS = "hbase.regionserver.class";
-  
+
   /** Parameter name for what region server implementation to use. */
   static final String REGION_SERVER_IMPL= "hbase.regionserver.impl";
-  
+
   /** Default region server interface class name. */
   static final String DEFAULT_REGION_SERVER_CLASS = HRegionInterface.class.getName();
 
@@ -119,35 +119,35 @@ public interface HConstants {
 
   /** Parameter name for how often threads should wake up */
   static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
-  
+
   /** Parameter name for how often a region should should perform a major compaction */
   static final String MAJOR_COMPACTION_PERIOD = "hbase.hregion.majorcompaction";
 
   /** Parameter name for HBase instance root directory */
   static final String HBASE_DIR = "hbase.rootdir";
-  
-  /** Used to construct the name of the log directory for a region server 
+
+  /** Used to construct the name of the log directory for a region server
    * Use '.' as a special character to seperate the log files from table data */
   static final String HREGION_LOGDIR_NAME = ".logs";
 
   /** Name of old log file for reconstruction */
   static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
-  
+
   /** Used to construct the name of the compaction directory during compaction */
   static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
-  
+
   /** Default maximum file size */
   static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
-  
+
   /** Default size of a reservation block   */
   static final int DEFAULT_SIZE_RESERVATION_BLOCK = 1024 * 1024 * 5;
 
   /** Maximum value length, enforced on KeyValue construction */
   static final int MAXIMUM_VALUE_LENGTH = Integer.MAX_VALUE;
-  
+
   // Always store the location of the root table's HRegion.
   // This HRegion is never split.
-  
+
   // region name = table + startkey + regionid. This is the row key.
   // each row in the root and meta tables describes exactly 1 region
   // Do we ever need to know all the information that we are storing?
@@ -158,7 +158,7 @@ public interface HConstants {
   // "." (and since no other table name can start with either of these
   // characters, the root region will always be the first entry in such a Map,
   // followed by all the meta regions (which will be ordered by their starting
-  // row key as well), followed by all user tables. So when the Master is 
+  // row key as well), followed by all user tables. So when the Master is
   // choosing regions to assign, it will always choose the root region first,
   // followed by the meta regions, followed by user regions. Since the root
   // and meta regions always need to be on-line, this ensures that they will
@@ -169,68 +169,68 @@ public interface HConstants {
   //
   // New stuff.  Making a slow transition.
   //
-  
+
   /** The root table's name.*/
   static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-");
 
   /** The META table's name. */
-  static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");  
+  static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");
 
   /** delimiter used between portions of a region name */
   public static final int META_ROW_DELIMITER = ',';
 
   /** The catalog family as a string*/
   static final String CATALOG_FAMILY_STR = "info";
-  
+
   /** The catalog family */
   static final byte [] CATALOG_FAMILY = Bytes.toBytes(CATALOG_FAMILY_STR);
-  
+
   /** The catalog historian family */
   static final byte [] CATALOG_HISTORIAN_FAMILY = Bytes.toBytes("historian");
-  
+
   /** The regioninfo column qualifier */
   static final byte [] REGIONINFO_QUALIFIER = Bytes.toBytes("regioninfo");
-    
+
   /** The server column qualifier */
   static final byte [] SERVER_QUALIFIER = Bytes.toBytes("server");
-  
+
   /** The startcode column qualifier */
   static final byte [] STARTCODE_QUALIFIER = Bytes.toBytes("serverstartcode");
-  
+
   /** The lower-half split region column qualifier */
   static final byte [] SPLITA_QUALIFIER = Bytes.toBytes("splitA");
-  
+
   /** The upper-half split region column qualifier */
   static final byte [] SPLITB_QUALIFIER = Bytes.toBytes("splitB");
-  
+
   // Other constants
 
   /**
    * An empty instance.
    */
   static final byte [] EMPTY_BYTE_ARRAY = new byte [0];
-  
+
   /**
    * Used by scanners, etc when they want to start at the beginning of a region
    */
   static final byte [] EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
-  
+
   /**
    * Last row in a table.
    */
   static final byte [] EMPTY_END_ROW = EMPTY_START_ROW;
 
-  /** 
-    * Used by scanners and others when they're trying to detect the end of a 
-    * table 
+  /**
+    * Used by scanners and others when they're trying to detect the end of a
+    * table
     */
   static final byte [] LAST_ROW = EMPTY_BYTE_ARRAY;
-  
+
   /**
    * Max length a row can have because of the limitation in TFile.
    */
   static final int MAX_ROW_LENGTH = Short.MAX_VALUE;
-  
+
   /** When we encode strings, we always specify UTF8 encoding */
   static final String UTF8_ENCODING = "UTF-8";
 
@@ -245,18 +245,18 @@ public interface HConstants {
    * LATEST_TIMESTAMP in bytes form
    */
   static final byte [] LATEST_TIMESTAMP_BYTES = Bytes.toBytes(LATEST_TIMESTAMP);
-  
+
   /**
    * Define for 'return-all-versions'.
    */
   static final int ALL_VERSIONS = Integer.MAX_VALUE;
-  
+
   /**
    * Unlimited time-to-live.
    */
 //  static final int FOREVER = -1;
   static final int FOREVER = Integer.MAX_VALUE;
-  
+
   /**
    * Seconds in a week
    */
@@ -276,34 +276,34 @@ public interface HConstants {
   static final String NAME = "NAME";
   static final String VERSIONS = "VERSIONS";
   static final String IN_MEMORY = "IN_MEMORY";
-  
+
   /**
    * This is a retry backoff multiplier table similar to the BSD TCP syn
    * backoff table, a bit more aggressive than simple exponential backoff.
-   */ 
+   */
   public static int RETRY_BACKOFF[] = { 1, 1, 1, 2, 2, 4, 4, 8, 16, 32 };
 
   /** modifyTable op for replacing the table descriptor */
   public static enum Modify {
     CLOSE_REGION,
-    TABLE_COMPACT, 
+    TABLE_COMPACT,
     TABLE_FLUSH,
     TABLE_MAJOR_COMPACT,
-    TABLE_SET_HTD, 
+    TABLE_SET_HTD,
     TABLE_SPLIT
   }
-  
+
     /**
      * Parameter name for maximum number of bytes returned when calling a
      * scanner's next method.
      */
   public static String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size";
-  
+
   /**
    * Maximum number of bytes returned when calling a scanner's next method.
    * Note that when a single row is larger than this limit the row is still
    * returned completely.
-   * 
+   *
    * The default value is unlimited.
    */
   public static long DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE = Long.MAX_VALUE;

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMerge.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMerge.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMerge.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMerge.java Fri May  7 19:17:48 2010
@@ -43,29 +43,29 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
 
-/** 
+/**
  * A non-instantiable class that has a static method capable of compacting
  * a table by merging adjacent regions.
  */
 class HMerge implements HConstants {
   static final Log LOG = LogFactory.getLog(HMerge.class);
   static final Random rand = new Random();
-  
+
   /*
    * Not instantiable
    */
   private HMerge() {
     super();
   }
-  
+
   /**
    * Scans the table and merges two adjacent regions if they are small. This
    * only happens when a lot of rows are deleted.
-   * 
+   *
    * When merging the META region, the HBase instance must be offline.
    * When merging a normal table, the HBase instance must be online, but the
-   * table must be disabled. 
-   * 
+   * table must be disabled.
+   *
    * @param conf        - configuration object for HBase
    * @param fs          - FileSystem where regions reside
    * @param tableName   - Table to be compacted
@@ -99,7 +99,7 @@ class HMerge implements HConstants {
     protected final HLog hlog;
     private final long maxFilesize;
 
-    
+
     protected Merger(HBaseConfiguration conf, FileSystem fs,
       final byte [] tableName)
     throws IOException {
@@ -117,7 +117,7 @@ class HMerge implements HConstants {
       this.hlog =
         new HLog(fs, logdir, conf, null);
     }
-    
+
     void process() throws IOException {
       try {
         for(HRegionInfo[] regionsToMerge = next();
@@ -130,19 +130,19 @@ class HMerge implements HConstants {
       } finally {
         try {
           hlog.closeAndDelete();
-          
+
         } catch(IOException e) {
           LOG.error(e);
         }
       }
     }
-    
+
     protected boolean merge(final HRegionInfo[] info) throws IOException {
       if(info.length < 2) {
         LOG.info("only one region - nothing to merge");
         return false;
       }
-      
+
       HRegion currentRegion = null;
       long currentSize = 0;
       HRegion nextRegion = null;
@@ -181,13 +181,13 @@ class HMerge implements HConstants {
       }
       return true;
     }
-    
+
     protected abstract HRegionInfo[] next() throws IOException;
-    
+
     protected abstract void updateMeta(final byte [] oldRegion1,
       final byte [] oldRegion2, HRegion newRegion)
     throws IOException;
-    
+
   }
 
   /** Instantiated to compact a normal user table */
@@ -196,7 +196,7 @@ class HMerge implements HConstants {
     private final HTable table;
     private final ResultScanner metaScanner;
     private HRegionInfo latestRegion;
-    
+
     OnlineMerger(HBaseConfiguration conf, FileSystem fs,
       final byte [] tableName)
     throws IOException {
@@ -206,7 +206,7 @@ class HMerge implements HConstants {
       this.metaScanner = table.getScanner(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
       this.latestRegion = null;
     }
-    
+
     private HRegionInfo nextRegion() throws IOException {
       try {
         Result results = getMetaRow();
@@ -232,7 +232,7 @@ class HMerge implements HConstants {
         throw e;
       }
     }
-    
+
     protected void checkOfflined(final HRegionInfo hri)
     throws TableNotDisabledException {
       if (!hri.isOffline()) {
@@ -240,7 +240,7 @@ class HMerge implements HConstants {
           hri.getRegionNameAsString() + " is not disabled");
       }
     }
-    
+
     /*
      * Check current row has a HRegionInfo.  Skip to next row if HRI is empty.
      * @return A Map of the row content else null if we are off the end.
@@ -280,7 +280,7 @@ class HMerge implements HConstants {
 
     @Override
     protected void updateMeta(final byte [] oldRegion1,
-        final byte [] oldRegion2, 
+        final byte [] oldRegion2,
       HRegion newRegion)
     throws IOException {
       byte[][] regionsToDelete = {oldRegion1, oldRegion2};
@@ -312,10 +312,10 @@ class HMerge implements HConstants {
   private static class OfflineMerger extends Merger {
     private final List<HRegionInfo> metaRegions = new ArrayList<HRegionInfo>();
     private final HRegion root;
-    
+
     OfflineMerger(HBaseConfiguration conf, FileSystem fs)
         throws IOException {
-      
+
       super(conf, fs, META_TABLE_NAME);
 
       Path rootTableDir = HTableDescriptor.getTableDir(
@@ -323,16 +323,16 @@ class HMerge implements HConstants {
           ROOT_TABLE_NAME);
 
       // Scan root region to find all the meta regions
-      
+
       root = HRegion.newHRegion(rootTableDir, hlog, fs, conf,
           HRegionInfo.ROOT_REGIONINFO, null);
       root.initialize(null, null);
 
       Scan scan = new Scan();
       scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
-      InternalScanner rootScanner = 
+      InternalScanner rootScanner =
         root.getScanner(scan);
-      
+
       try {
         List<KeyValue> results = new ArrayList<KeyValue>();
         while(rootScanner.next(results)) {
@@ -347,7 +347,7 @@ class HMerge implements HConstants {
         rootScanner.close();
         try {
           root.close();
-          
+
         } catch(IOException e) {
           LOG.error(e);
         }
@@ -382,7 +382,7 @@ class HMerge implements HConstants {
         delete.deleteColumns(HConstants.CATALOG_FAMILY,
             HConstants.SPLITB_QUALIFIER);
         root.delete(delete, null, true);
-        
+
         if(LOG.isDebugEnabled()) {
           LOG.debug("updated columns in row: " + Bytes.toString(regionsToDelete[r]));
         }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMsg.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMsg.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMsg.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HMsg.java Fri May  7 19:17:48 2010
@@ -27,9 +27,9 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.io.Writable;
 
 /**
- * HMsg is for communicating instructions between the HMaster and the 
+ * HMsg is for communicating instructions between the HMaster and the
  * HRegionServers.
- * 
+ *
  * Most of the time the messages are simple but some messages are accompanied
  * by the region affected.  HMsg may also carry optional message.
  */
@@ -40,11 +40,11 @@ public class HMsg implements Writable {
   public static enum Type {
     /** null message */
     MSG_NONE,
-    
+
     // Message types sent from master to region server
     /** Start serving the specified region */
     MSG_REGION_OPEN,
-    
+
     /** Stop serving the specified region */
     MSG_REGION_CLOSE,
 
@@ -56,22 +56,22 @@ public class HMsg implements Writable {
 
     /** Region server is unknown to master. Restart */
     MSG_CALL_SERVER_STARTUP,
-    
+
     /** Master tells region server to stop */
     MSG_REGIONSERVER_STOP,
-    
+
     /** Stop serving the specified region and don't report back that it's
      * closed
      */
     MSG_REGION_CLOSE_WITHOUT_REPORT,
-  
+
     /** Stop serving user regions */
     MSG_REGIONSERVER_QUIESCE,
 
     // Message types sent from the region server to the master
     /** region server is now serving the specified region */
     MSG_REPORT_OPEN,
-    
+
     /** region server is no longer serving the specified region */
     MSG_REPORT_CLOSE,
 
@@ -80,7 +80,7 @@ public class HMsg implements Writable {
 
     /**
      * Region server split the region associated with this message.
-     * 
+     *
      * Note that this message is immediately followed by two MSG_REPORT_OPEN
      * messages, one for each of the new regions resulting from the split
      * @deprecated See MSG_REPORT_SPLIT_INCLUDES_DAUGHTERS
@@ -89,7 +89,7 @@ public class HMsg implements Writable {
 
     /**
      * Region server is shutting down
-     * 
+     *
      * Note that this message is followed by MSG_REPORT_CLOSE messages for each
      * region the region server was serving, unless it was told to quiesce.
      */
@@ -99,12 +99,12 @@ public class HMsg implements Writable {
      * regions
      */
     MSG_REPORT_QUIESCED,
-    
+
     /**
      * Flush
      */
     MSG_REGION_FLUSH,
-    
+
     /**
      * Run Major Compaction
      */
@@ -112,7 +112,7 @@ public class HMsg implements Writable {
 
     /**
      * Region server split the region associated with this message.
-     * 
+     *
      * Its like MSG_REPORT_SPLIT only it carries the daughters in the message
      * rather than send them individually in MSG_REPORT_OPEN messages.
      */
@@ -144,7 +144,7 @@ public class HMsg implements Writable {
   public HMsg(final HMsg.Type type) {
     this(type, new HRegionInfo(), null);
   }
-  
+
   /**
    * Construct a message with the specified message and HRegionInfo
    * @param type Message type
@@ -156,7 +156,7 @@ public class HMsg implements Writable {
 
   /**
    * Construct a message with the specified message and HRegionInfo
-   * 
+   *
    * @param type Message type
    * @param hri Region to which message <code>type</code> applies.  Cannot be
    * null.  If no info associated, used other Constructor.
@@ -168,7 +168,7 @@ public class HMsg implements Writable {
 
   /**
    * Construct a message with the specified message and HRegionInfo
-   * 
+   *
    * @param type Message type
    * @param hri Region to which message <code>type</code> applies.  Cannot be
    * null.  If no info associated, used other Constructor.
@@ -202,7 +202,7 @@ public class HMsg implements Writable {
   public Type getType() {
     return this.type;
   }
-  
+
   /**
    * @param other Message type to compare to
    * @return True if we are of same message type as <code>other</code>
@@ -281,7 +281,7 @@ public class HMsg implements Writable {
     }
     return result;
   }
-  
+
   // ////////////////////////////////////////////////////////////////////////////
   // Writable
   //////////////////////////////////////////////////////////////////////////////

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionInfo.java Fri May  7 19:17:48 2010
@@ -79,7 +79,7 @@ public class HRegionInfo extends Version
     result ^= this.tableDesc.hashCode();
     this.hashCode = result;
   }
-  
+
   /**
    * Private constructor used constructing HRegionInfo for the catalog root and
    * first meta regions
@@ -98,10 +98,10 @@ public class HRegionInfo extends Version
     super();
     this.tableDesc = new HTableDescriptor();
   }
-  
+
   /**
    * Construct HRegionInfo with explicit parameters
-   * 
+   *
    * @param tableDesc the table descriptor
    * @param startKey first key in region
    * @param endKey end of key range
@@ -115,7 +115,7 @@ public class HRegionInfo extends Version
 
   /**
    * Construct HRegionInfo with explicit parameters
-   * 
+   *
    * @param tableDesc the table descriptor
    * @param startKey first key in region
    * @param endKey end of key range
@@ -131,7 +131,7 @@ public class HRegionInfo extends Version
 
   /**
    * Construct HRegionInfo with explicit parameters
-   * 
+   *
    * @param tableDesc the table descriptor
    * @param startKey first key in region
    * @param endKey end of key range
@@ -158,10 +158,10 @@ public class HRegionInfo extends Version
     this.tableDesc = tableDesc;
     setHashCode();
   }
-  
+
   /**
    * Costruct a copy of another HRegionInfo
-   * 
+   *
    * @param other
    */
   public HRegionInfo(HRegionInfo other) {
@@ -177,7 +177,7 @@ public class HRegionInfo extends Version
     this.hashCode = other.hashCode();
     this.encodedName = other.getEncodedName();
   }
-  
+
   private static byte [] createRegionName(final byte [] tableName,
       final byte [] startKey, final long regionid) {
     return createRegionName(tableName, startKey, Long.toString(regionid));
@@ -216,7 +216,7 @@ public class HRegionInfo extends Version
     System.arraycopy(id, 0, b, offset, id.length);
     return b;
   }
-  
+
   /**
    * Separate elements of a regionName.
    * @param regionName
@@ -246,11 +246,11 @@ public class HRegionInfo extends Version
     byte [] startKey = HConstants.EMPTY_BYTE_ARRAY;
     if(offset != tableName.length + 1) {
       startKey = new byte[offset - tableName.length - 1];
-      System.arraycopy(regionName, tableName.length + 1, startKey, 0, 
+      System.arraycopy(regionName, tableName.length + 1, startKey, 0,
           offset - tableName.length - 1);
     }
     byte [] id = new byte[regionName.length - offset - 1];
-    System.arraycopy(regionName, offset + 1, id, 0, 
+    System.arraycopy(regionName, offset + 1, id, 0,
         regionName.length - offset - 1);
     byte [][] elements = new byte[3][];
     elements[0] = tableName;
@@ -258,7 +258,7 @@ public class HRegionInfo extends Version
     elements[2] = id;
     return elements;
   }
-  
+
   /** @return the endKey */
   public byte [] getEndKey(){
     return endKey;
@@ -283,7 +283,7 @@ public class HRegionInfo extends Version
   public String getRegionNameAsString() {
     return this.regionNameStr;
   }
-  
+
   /** @return the encoded region name */
   public synchronized int getEncodedName() {
     if (this.encodedName == NO_HASH) {
@@ -313,7 +313,7 @@ public class HRegionInfo extends Version
   public boolean isRootRegion() {
     return this.tableDesc.isRootRegion();
   }
-  
+
   /** @return true if this is the meta table */
   public boolean isMetaTable() {
     return this.tableDesc.isMetaTable();
@@ -323,14 +323,14 @@ public class HRegionInfo extends Version
   public boolean isMetaRegion() {
     return this.tableDesc.isMetaRegion();
   }
-  
+
   /**
    * @return True if has been split and has daughters.
    */
   public boolean isSplit() {
     return this.split;
   }
-  
+
   /**
    * @param split set split status
    */
@@ -363,7 +363,7 @@ public class HRegionInfo extends Version
       Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" +
       Bytes.toStringBinary(this.endKey) +
       "', ENCODED => " + getEncodedName() + "," +
-      (isOffline()? " OFFLINE => true,": "") + 
+      (isOffline()? " OFFLINE => true,": "") +
       (isSplit()? " SPLIT => true,": "") +
       " TABLE => {" + this.tableDesc.toString() + "}";
   }
@@ -415,7 +415,7 @@ public class HRegionInfo extends Version
     tableDesc.write(out);
     out.writeInt(hashCode);
   }
-  
+
   @Override
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
@@ -429,16 +429,16 @@ public class HRegionInfo extends Version
     this.tableDesc.readFields(in);
     this.hashCode = in.readInt();
   }
-  
+
   //
   // Comparable
   //
-  
+
   public int compareTo(HRegionInfo o) {
     if (o == null) {
       return 1;
     }
-    
+
     // Are regions of same table?
     int result = this.tableDesc.compareTo(o.tableDesc);
     if (result != 0) {
@@ -450,7 +450,7 @@ public class HRegionInfo extends Version
     if (result != 0) {
       return result;
     }
-    
+
     // Compare end keys.
     return Bytes.compareTo(this.endKey, o.endKey);
   }

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionLocation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionLocation.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionLocation.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HRegionLocation.java Fri May  7 19:17:48 2010
@@ -29,7 +29,7 @@ public class HRegionLocation implements 
 
   /**
    * Constructor
-   * 
+   *
    * @param regionInfo the HRegionInfo for the region
    * @param serverAddress the HServerAddress for the region server
    */
@@ -73,7 +73,7 @@ public class HRegionLocation implements 
     result ^= this.serverAddress.hashCode();
     return result;
   }
-  
+
   /** @return HRegionInfo */
   public HRegionInfo getRegionInfo(){
     return regionInfo;
@@ -87,7 +87,7 @@ public class HRegionLocation implements 
   //
   // Comparable
   //
-  
+
   public int compareTo(HRegionLocation o) {
     int result = this.regionInfo.compareTo(o.regionInfo);
     if(result == 0) {

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerAddress.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerAddress.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerAddress.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerAddress.java Fri May  7 19:17:48 2010
@@ -49,10 +49,10 @@ public class HServerAddress implements W
     this.stringValue = address.getAddress().getHostAddress() + ":" +
       address.getPort();
   }
-  
+
   /**
    * Construct a HServerAddress from a string of the form hostname:port
-   * 
+   *
    * @param hostAndPort format 'hostname:port'
    */
   public HServerAddress(String hostAndPort) {
@@ -66,7 +66,7 @@ public class HServerAddress implements W
     this.address = new InetSocketAddress(host, port);
     this.stringValue = hostAndPort;
   }
-  
+
   /**
    * Construct a HServerAddress from hostname, port number
    * @param bindAddress host name
@@ -76,10 +76,10 @@ public class HServerAddress implements W
     this.address = new InetSocketAddress(bindAddress, port);
     this.stringValue = bindAddress + ":" + port;
   }
-  
+
   /**
    * Construct a HServerAddress from another HServerAddress
-   * 
+   *
    * @param other the HServerAddress to copy from
    */
   public HServerAddress(HServerAddress other) {
@@ -98,7 +98,7 @@ public class HServerAddress implements W
   public int getPort() {
     return address.getPort();
   }
-  
+
   /** @return host name */
   public String getHostname() {
     return address.getHostName();
@@ -143,7 +143,7 @@ public class HServerAddress implements W
     result ^= this.stringValue.hashCode();
     return result;
   }
-  
+
   //
   // Writable
   //
@@ -151,11 +151,11 @@ public class HServerAddress implements W
   public void readFields(DataInput in) throws IOException {
     String bindAddress = in.readUTF();
     int port = in.readInt();
-    
+
     if(bindAddress == null || bindAddress.length() == 0) {
       address = null;
       stringValue = null;
-      
+
     } else {
       address = new InetSocketAddress(bindAddress, port);
       stringValue = bindAddress + ":" + port;
@@ -166,17 +166,17 @@ public class HServerAddress implements W
     if (address == null) {
       out.writeUTF("");
       out.writeInt(0);
-      
+
     } else {
       out.writeUTF(address.getAddress().getHostAddress());
       out.writeInt(address.getPort());
     }
   }
-  
+
   //
   // Comparable
   //
-  
+
   public int compareTo(HServerAddress o) {
     // Addresses as Strings may not compare though address is for the one
     // server with only difference being that one address has hostname

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerInfo.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerInfo.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerInfo.java Fri May  7 19:17:48 2010
@@ -31,7 +31,7 @@ import org.apache.hadoop.io.WritableComp
 /**
  * HServerInfo contains metainfo about an HRegionServer, Currently it only
  * contains the server start code.
- * 
+ *
  * In the future it will contain information about the source machine and
  * load statistics.
  */
@@ -46,10 +46,10 @@ public class HServerInfo implements Writ
 
   /** default constructor - used by Writable */
   public HServerInfo() {
-    this(new HServerAddress(), 0, 
+    this(new HServerAddress(), 0,
         HConstants.DEFAULT_REGIONSERVER_INFOPORT, "default name");
   }
-  
+
   /**
    * Constructor
    * @param serverAddress
@@ -64,7 +64,7 @@ public class HServerInfo implements Writ
     this.infoPort = infoPort;
     this.name = name;
   }
-  
+
   /**
    * Construct a new object using another as input (like a copy constructor)
    * @param other
@@ -95,7 +95,7 @@ public class HServerInfo implements Writ
   public synchronized HServerAddress getServerAddress() {
     return new HServerAddress(serverAddress);
   }
-  
+
   /**
    * Change the server address.
    * @param serverAddress New server address
@@ -104,26 +104,26 @@ public class HServerInfo implements Writ
     this.serverAddress = serverAddress;
     this.serverName = null;
   }
- 
+
   /** @return the server start code */
   public synchronized long getStartCode() {
     return startCode;
   }
-  
+
   /**
    * @return Port the info server is listening on.
    */
   public int getInfoPort() {
     return this.infoPort;
   }
-  
+
   /**
    * @param infoPort - new port of info server
    */
   public void setInfoPort(int infoPort) {
     this.infoPort = infoPort;
   }
-  
+
   /**
    * @param startCode the startCode to set
    */
@@ -131,7 +131,7 @@ public class HServerInfo implements Writ
     this.startCode = startCode;
     this.serverName = null;
   }
-  
+
   /**
    * @return the server name in the form hostname_startcode_port
    */
@@ -148,7 +148,7 @@ public class HServerInfo implements Writ
     }
     return this.serverName;
   }
-  
+
   /**
    * Get the hostname of the server
    * @return hostname
@@ -156,7 +156,7 @@ public class HServerInfo implements Writ
   public String getName() {
     return name;
   }
- 
+
   /**
    * Set the hostname of the server
    * @param name hostname
@@ -201,7 +201,7 @@ public class HServerInfo implements Writ
 
 
   // Writable
-  
+
   public void readFields(DataInput in) throws IOException {
     this.serverAddress.readFields(in);
     this.startCode = in.readLong();
@@ -229,7 +229,7 @@ public class HServerInfo implements Writ
   private static String getServerName(HServerInfo info) {
     return getServerName(info.getServerAddress(), info.getStartCode());
   }
-  
+
   /**
    * @param serverAddress in the form hostname:port
    * @param startCode

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerLoad.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerLoad.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerLoad.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HServerLoad.java Fri May  7 19:17:48 2010
@@ -49,7 +49,7 @@ public class HServerLoad implements Writ
   /** per-region load metrics */
   private ArrayList<RegionLoad> regionLoad = new ArrayList<RegionLoad>();
 
-  /** 
+  /**
    * Encapsulates per-region loading metrics.
    */
   public static class RegionLoad implements Writable {
@@ -82,7 +82,7 @@ public class HServerLoad implements Writ
      * @param storefileIndexSizeMB
      */
     public RegionLoad(final byte[] name, final int stores,
-        final int storefiles, final int storefileSizeMB, 
+        final int storefiles, final int storefileSizeMB,
         final int memstoreSizeMB, final int storefileIndexSizeMB) {
       this.name = name;
       this.stores = stores;
@@ -239,7 +239,7 @@ public class HServerLoad implements Writ
   public HServerLoad() {
     super();
   }
-  
+
   /**
    * Constructor
    * @param numberOfRequests
@@ -265,7 +265,7 @@ public class HServerLoad implements Writ
   /**
    * Originally, this method factored in the effect of requests going to the
    * server as well. However, this does not interact very well with the current
-   * region rebalancing code, which only factors number of regions. For the 
+   * region rebalancing code, which only factors number of regions. For the
    * interim, until we can figure out how to make rebalancing use all the info
    * available, we're just going to make load purely the number of regions.
    *
@@ -285,7 +285,7 @@ public class HServerLoad implements Writ
   public String toString() {
     return toString(1);
   }
-  
+
   /**
    * Returns toString() with the number of requests divided by the message
    * interval in seconds
@@ -330,9 +330,9 @@ public class HServerLoad implements Writ
     result ^= Integer.valueOf(numberOfRegions).hashCode();
     return result;
   }
-  
+
   // Getters
-  
+
   /**
    * @return the numberOfRegions
    */

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HStoreKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HStoreKey.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HStoreKey.java Fri May  7 19:17:48 2010
@@ -67,12 +67,12 @@ public class HStoreKey implements Writab
   public HStoreKey() {
     super();
   }
-  
+
   /**
    * Create an HStoreKey specifying only the row
    * The column defaults to the empty string, the time stamp defaults to
    * Long.MAX_VALUE and the table defaults to empty string
-   * 
+   *
    * @param row - row key
    */
   public HStoreKey(final byte [] row) {
@@ -83,17 +83,17 @@ public class HStoreKey implements Writab
    * Create an HStoreKey specifying only the row
    * The column defaults to the empty string, the time stamp defaults to
    * Long.MAX_VALUE and the table defaults to empty string
-   * 
+   *
    * @param row - row key
    */
   public HStoreKey(final String row) {
     this(Bytes.toBytes(row), Long.MAX_VALUE);
   }
- 
+
   /**
    * Create an HStoreKey specifying the row and timestamp
    * The column and table names default to the empty string
-   * 
+   *
    * @param row row key
    * @param timestamp timestamp value
    */
@@ -105,7 +105,7 @@ public class HStoreKey implements Writab
    * Create an HStoreKey specifying the row and column names
    * The timestamp defaults to LATEST_TIMESTAMP
    * and table name defaults to the empty string
-   * 
+   *
    * @param row row key
    * @param column column key
    */
@@ -117,7 +117,7 @@ public class HStoreKey implements Writab
    * Create an HStoreKey specifying the row and column names
    * The timestamp defaults to LATEST_TIMESTAMP
    * and table name defaults to the empty string
-   * 
+   *
    * @param row row key
    * @param column column key
    */
@@ -127,7 +127,7 @@ public class HStoreKey implements Writab
 
   /**
    * Create an HStoreKey specifying all the fields
-   * Does not make copies of the passed byte arrays. Presumes the passed 
+   * Does not make copies of the passed byte arrays. Presumes the passed
    * arrays immutable.
    * @param row row key
    * @param column column key
@@ -139,7 +139,7 @@ public class HStoreKey implements Writab
 
   /**
    * Create an HStoreKey specifying all the fields with specified table
-   * Does not make copies of the passed byte arrays. Presumes the passed 
+   * Does not make copies of the passed byte arrays. Presumes the passed
    * arrays immutable.
    * @param row row key
    * @param column column key
@@ -154,7 +154,7 @@ public class HStoreKey implements Writab
 
   /**
    * Constructs a new HStoreKey from another
-   * 
+   *
    * @param other the source key
    */
   public HStoreKey(final HStoreKey other) {
@@ -167,16 +167,16 @@ public class HStoreKey implements Writab
 
   /**
    * Change the value of the row key
-   * 
+   *
    * @param newrow new row key value
    */
   public void setRow(final byte [] newrow) {
     this.row = newrow;
   }
-  
+
   /**
    * Change the value of the column in this key
-   * 
+   *
    * @param c new column family value
    */
   public void setColumn(final byte [] c) {
@@ -185,16 +185,16 @@ public class HStoreKey implements Writab
 
   /**
    * Change the value of the timestamp field
-   * 
+   *
    * @param timestamp new timestamp value
    */
   public void setVersion(final long timestamp) {
     this.timestamp = timestamp;
   }
-  
+
   /**
    * Set the value of this HStoreKey from the supplied key
-   * 
+   *
    * @param k key value to copy
    */
   public void set(final HStoreKey k) {
@@ -202,12 +202,12 @@ public class HStoreKey implements Writab
     this.column = k.getColumn();
     this.timestamp = k.getTimestamp();
   }
-  
+
   /** @return value of row key */
   public byte [] getRow() {
     return row;
   }
-  
+
   /** @return value of column */
   public byte [] getColumn() {
     return this.column;
@@ -224,17 +224,17 @@ public class HStoreKey implements Writab
    * @return True if same row and column.
    * @see #matchesWithoutColumn(HStoreKey)
    * @see #matchesRowFamily(HStoreKey)
-   */ 
+   */
   public boolean matchesRowCol(final HStoreKey other) {
     return HStoreKey.equalsTwoRowKeys(getRow(), other.getRow()) &&
       Bytes.equals(getColumn(), other.getColumn());
   }
-  
+
   /**
    * Compares the row and timestamp of two keys
-   * 
+   *
    * @param other Key to copmare against. Compares row and timestamp.
-   * 
+   *
    * @return True if same row and timestamp is greater than <code>other</code>
    * @see #matchesRowCol(HStoreKey)
    * @see #matchesRowFamily(HStoreKey)
@@ -246,9 +246,9 @@ public class HStoreKey implements Writab
 
   /**
    * Compares the row and column family of two keys
-   * 
+   *
    * @param that Key to compare against. Compares row and column family
-   * 
+   *
    * @return true if same row and column family
    * @see #matchesRowCol(HStoreKey)
    * @see #matchesWithoutColumn(HStoreKey)
@@ -307,7 +307,7 @@ public class HStoreKey implements Writab
   // Comparable
 
   /**
-   * @param o 
+   * @param o
    * @return int
    * @deprecated Use Comparators instead.  This can give wrong results.
    */
@@ -329,7 +329,7 @@ public class HStoreKey implements Writab
     if (left == null && right == null) return 0;
     if (left == null) return -1;
     if (right == null) return 1;
-    
+
     int result = Bytes.compareTo(left.getRow(), right.getRow());
     if (result != 0) {
       return result;
@@ -357,7 +357,7 @@ public class HStoreKey implements Writab
    * @param column
    * @return New byte array that holds <code>column</code> family prefix only
    * (Does not include the colon DELIMITER).
-   * @throws ColumnNameParseException 
+   * @throws ColumnNameParseException
    * @see #parseColumn(byte[])
    */
   public static byte [] getFamily(final byte [] column)
@@ -372,7 +372,7 @@ public class HStoreKey implements Writab
     System.arraycopy(column, 0, result, 0, index);
     return result;
   }
-  
+
   /**
    * @param column
    * @return Return hash of family portion of passed column.
@@ -383,7 +383,7 @@ public class HStoreKey implements Writab
     // delimiter
     return Bytes.mapKey(column, index > 0? index: column.length);
   }
-  
+
   /**
    * @param family
    * @param column
@@ -429,7 +429,7 @@ public class HStoreKey implements Writab
    * @return Return array of size two whose first element has the family
    * prefix of passed column <code>c</code> and whose second element is the
    * column qualifier.
-   * @throws ColumnNameParseException 
+   * @throws ColumnNameParseException
    */
   public static byte [][] parseColumn(final byte [] c)
   throws ColumnNameParseException {
@@ -559,7 +559,7 @@ public class HStoreKey implements Writab
   }
 
   /**
-   * @return The bytes of <code>hsk</code> gotten by running its 
+   * @return The bytes of <code>hsk</code> gotten by running its
    * {@link Writable#write(java.io.DataOutput)} method.
    * @throws IOException
    */
@@ -573,7 +573,7 @@ public class HStoreKey implements Writab
    * row and column.  This is a customized version of
    * {@link Writables#getBytes(Writable)}
    * @param hsk Instance
-   * @return The bytes of <code>hsk</code> gotten by running its 
+   * @return The bytes of <code>hsk</code> gotten by running its
    * {@link Writable#write(java.io.DataOutput)} method.
    * @throws IOException
    */
@@ -796,7 +796,7 @@ public class HStoreKey implements Writab
       if (left == null && right == null) return 0;
       if (left == null) return -1;
       if (right == null) return 1;
-      
+
       byte [] lrow = left.getRow();
       byte [] rrow = right.getRow();
       int result = compareRows(lrow, 0, lrow.length, rrow, 0, rrow.length);
@@ -962,7 +962,7 @@ public class HStoreKey implements Writab
   }
 
   /**
-   * RawComparator for plain -- i.e. non-catalog table keys such as 
+   * RawComparator for plain -- i.e. non-catalog table keys such as
    * -ROOT- and .META. -- HStoreKeys.  Compares at byte level.  Knows how to
    * handle the vints that introduce row and columns in the HSK byte array
    * representation. Adds

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=942184&r1=942183&r2=942184&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Fri May  7 19:17:48 2010
@@ -91,18 +91,18 @@ ISerializable {
   public static final boolean DEFAULT_READONLY = false;
 
   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*64L;
-  
+
   public static final long DEFAULT_MAX_FILESIZE = 1024*1024*256L;
-    
+
   private volatile Boolean meta = null;
   private volatile Boolean root = null;
 
   // Key is hash of the family name.
   public final Map<byte [], HColumnDescriptor> families =
     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
-   
+
   /**
-   * Private constructor used internally creating table descriptors for 
+   * Private constructor used internally creating table descriptors for
    * catalog tables: e.g. .META. and -ROOT-.
    */
   protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) {
@@ -115,7 +115,7 @@ ISerializable {
   }
 
   /**
-   * Private constructor used internally creating table descriptors for 
+   * Private constructor used internally creating table descriptors for
    * catalog tables: e.g. .META. and -ROOT-.
    */
   protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families,
@@ -131,8 +131,8 @@ ISerializable {
       this.values.put(entry.getKey(), entry.getValue());
     }
   }
-  
-  
+
+
   /**
    * Constructs an empty object.
    * For deserializing an HTableDescriptor instance only.
@@ -172,7 +172,7 @@ ISerializable {
   /**
    * Constructor.
    * <p>
-   * Makes a deep copy of the supplied descriptor. 
+   * Makes a deep copy of the supplied descriptor.
    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
    * @param desc The descriptor.
    */
@@ -287,7 +287,7 @@ ISerializable {
   public byte[] getValue(byte[] key) {
     return getValue(new ImmutableBytesWritable(key));
   }
-  
+
   private byte[] getValue(final ImmutableBytesWritable key) {
     ImmutableBytesWritable ibw = values.get(key);
     if (ibw == null)
@@ -320,7 +320,7 @@ ISerializable {
   public void setValue(byte[] key, byte[] value) {
     setValue(new ImmutableBytesWritable(key), value);
   }
-  
+
   /*
    * @param key The key.
    * @param value The value.
@@ -409,7 +409,7 @@ ISerializable {
       return Long.valueOf(Bytes.toString(value)).longValue();
     return DEFAULT_MEMSTORE_FLUSH_SIZE;
   }
-  
+
   /**
    * @param memstoreFlushSize memory cache flush size for each hregion
    */
@@ -600,7 +600,7 @@ ISerializable {
   public Collection<HColumnDescriptor> getFamilies() {
     return Collections.unmodifiableCollection(this.families.values());
   }
-  
+
   /**
    * @return Immutable sorted set of the keys of the families.
    */
@@ -646,7 +646,7 @@ ISerializable {
           10,  // Ten is arbitrary number.  Keep versions to help debuggging.
           Compression.Algorithm.NONE.getName(), true, true, 8 * 1024,
           HConstants.FOREVER, false) });
-  
+
   /** Table descriptor for <code>.META.</code> catalog table */
   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
       HConstants.META_TABLE_NAME, new HColumnDescriptor[] {