You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2014/03/12 22:17:20 UTC
svn commit: r1576909 [1/18] - in /hbase/branches/0.89-fb/src: ./
examples/thrift/ main/java/org/apache/hadoop/hbase/
main/java/org/apache/hadoop/hbase/avro/
main/java/org/apache/hadoop/hbase/avro/generated/
main/java/org/apache/hadoop/hbase/client/ mai...
Author: liyin
Date: Wed Mar 12 21:17:13 2014
New Revision: 1576909
URL: http://svn.apache.org/r1576909
Log:
[HBASE-10699] Reduce unnecessary object allocation related to ArrayList
[HBASE-10699] Fix the building problem.
[HBASE-10698] Updating the HBase.thrift file to work with Thrift2 C++ client
[HBASE-10709] ClientScanner with prefetching
[HBASE-9930] Fix TestPerRequestProfiling
[HBASE-8500] Provide a drain only option to rolling_restart
[master] Add a param for hbck to decide whether to clear meta entry for unassigned region
Summary: Multiple Commits
Added:
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ClientZKConnection.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HTableAsync.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HTableAsyncInterface.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HTableClientScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/IntegerOrResultOrException.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ResultScannerIterator.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/TMultiResponse.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/TRowMutations.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/filter/TFilter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/histogram/HistogramUtils.java
- copied, changed from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/HConnectionParams.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftClientInterface.java
- copied, changed from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ThriftHRegionInterface.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ThriftHRegionServer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HBaseNiftyThriftServer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/SelfRetryingListenableFuture.java
hbase/branches/0.89-fb/src/main/resources/org/apache/hadoop/hbase/thrift/HBase.thrift
hbase/branches/0.89-fb/src/main/resources/org/apache/hadoop/hbase/thrift/LegacyHBase.thrift
hbase/branches/0.89-fb/src/main/resources/org/apache/hadoop/hbase/thrift/gen_thrift_from_swift.sh (with props)
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestHServerAddress.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/UnstableTestSuite.java
- copied, changed from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestDelete.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestHTableClientScanner.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestIntegerOrResultOrException.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestMultiAction.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestMultiPut.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestMultiPutResponse.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestPut.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestResult.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestTMultiResponse.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestHRegionInterfaceSimpleFunctions.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestMasterToRSUseThrift.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServeRPCAndThriftConcurrently.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestServerSideException.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleOperations.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleRowMutations.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSimpleScan.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestSwiftSerDe.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftExceptions.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/swift/TestThriftMultiRSScenario.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TagRunner.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestTag.java
- copied, changed from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TitanUserInfo.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkClient.java
- copied, changed from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/BenchmarkFactory.java
- copied, changed from r1576907, hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCBenchmarkTool.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HBaseRPCProtocolComparison.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkClient.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/HadoopRPCBenchmarkFactory.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkClient.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/rpcbench/ThriftBenchmarkFactory.java
Modified:
hbase/branches/0.89-fb/src/examples/thrift/README.txt
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/Abortable.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMsg.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerAddress.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerInfo.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerLoad.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/avro/package.html
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ClientLocalScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/Delete.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/Get.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HBaseFsck.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HBaseFsckRepair.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HBaseLocalityCheck.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HTable.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/MultiPut.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/Operation.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ParallelScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/Put.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/Result.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ResultScannerImpl.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/RowLock.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/RowMutation.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/Scan.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/executor/HBaseExecutorService.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionEventData.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/filter/ParseConstants.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/histogram/HFileHistogram.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/histogram/HiveBasedNumericHistogram.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/io/hfile/histogram/UniformSplitHFileHistogram.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPCOptions.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/HMasterRegionInterface.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/ProfilingData.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/ipc/thrift/HBaseToThriftAdapter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogSplitter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/RowMutationSortReducer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/TotalOrderPartitioner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapreduce/loadtest/CompositeOperationGenerator.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentDomain.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentPlan.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/BaseScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/MultiColumnOperation.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RackManager.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionAssignmentSnapshot.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacement.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementPolicy.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableDelete.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableOperation.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKUnassignedWatcher.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/MetricsString.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/PersistentMetricsTimeVaryingRate.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/RequestMetrics.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/CompactUtility.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionManager.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/RegionOverloadedException.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperWrapper.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/HbaseConstants.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Hash.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Histogram.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/InfoServer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/Pair.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/ParamFormat.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/RollingRestart.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
hbase/branches/0.89-fb/src/main/javadoc/org/apache/hadoop/hbase/thrift/package.html
hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/assignmentPlan.jsp
hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/master.jsp
hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/table.jsp
hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
hbase/branches/0.89-fb/src/main/resources/hbase-webapps/master/zk.jsp
hbase/branches/0.89-fb/src/main/resources/hbase-webapps/regionserver/regionserver.jsp
hbase/branches/0.89-fb/src/main/resources/hbase-webapps/taskmonitor/taskmonitor.jsp
hbase/branches/0.89-fb/src/main/resources/org/apache/hadoop/hbase/thrift/thrift.sh
hbase/branches/0.89-fb/src/main/ruby/hbase/admin.rb
hbase/branches/0.89-fb/src/main/ruby/shell.rb
hbase/branches/0.89-fb/src/main/ruby/shell/commands/alter_async.rb
hbase/branches/0.89-fb/src/main/ruby/shell/commands/alter_status.rb
hbase/branches/0.89-fb/src/main/ruby/shell/commands/scan.rb
hbase/branches/0.89-fb/src/main/ruby/shell/commands/show_filters.rb
hbase/branches/0.89-fb/src/saveVersion.sh
hbase/branches/0.89-fb/src/site/resources/css/site.css
hbase/branches/0.89-fb/src/site/site.xml
hbase/branches/0.89-fb/src/site/xdoc/acid-semantics.xml
hbase/branches/0.89-fb/src/site/xdoc/bulk-loads.xml
hbase/branches/0.89-fb/src/site/xdoc/index.xml
hbase/branches/0.89-fb/src/site/xdoc/metrics.xml
hbase/branches/0.89-fb/src/site/xdoc/old_news.xml
hbase/branches/0.89-fb/src/site/xdoc/pseudo-distributed.xml
hbase/branches/0.89-fb/src/site/xdoc/replication.xml
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestMultiParallelPut.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestPerColumnFamilyFlush.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/avro/TestAvroServer.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestBatchedUpload.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestClientLocalScanner.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide2.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestHTable.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestMaxResponseSize.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestParallelScanner.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheColumnFamilySummary.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/ipc/TestPerRequestProfiling.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/ipc/TestRPCCompression.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/loadtest/ColumnFamilyProperties.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/loadtest/DataGenerator.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/loadtest/LoadTester.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/loadtest/MultiThreadedAction.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/loadtest/MultiThreadedWriter.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/loadtest/RegionSplitter.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/manual/utils/DataGenerator.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/manual/utils/HBaseUtils.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/manual/utils/HdfsAppender.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatNMappersPerRegion.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestROOTAssignment.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedCloseRegion.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedReopenRegion.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionCloseRetry.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQOS.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerResets.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/metrics/TestRpcMetricWrapper.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTClusterTestBase.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHBCpp.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestHeaderSendReceive.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestNativeThriftClient.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServerLegacy.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/HBaseHomePath.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java
hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java
hbase/branches/0.89-fb/src/test/resources/hbase-site.xml
Modified: hbase/branches/0.89-fb/src/examples/thrift/README.txt
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/examples/thrift/README.txt?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/examples/thrift/README.txt (original)
+++ hbase/branches/0.89-fb/src/examples/thrift/README.txt Wed Mar 12 21:17:13 2014
@@ -10,7 +10,7 @@ To run/compile this clients, you will fi
the language files:
thrift --gen cpp --gen java --gen rb --gen py -php \
- ../../../src/java/org/apache/hadoop/hbase/thrift/Hbase.thrift
+ ../../../src/java/org/apache/hadoop/hbase/thrift/LegacyHBase.thrift
See the individual DemoClient test files for more specific instructions on
running each test.
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/Abortable.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/Abortable.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/Abortable.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/Abortable.java Wed Mar 12 21:17:13 2014
@@ -36,7 +36,7 @@ public interface Abortable {
public void abort(String why, Throwable e);
/**
- * Check if the server or client was aborted.
+ * Check if the server or client was aborted.
* @return true if the server or client was aborted, false otherwise
*/
public boolean isAborted();
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java Wed Mar 12 21:17:13 2014
@@ -19,6 +19,9 @@
*/
package org.apache.hadoop.hbase;
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -35,7 +38,12 @@ import org.apache.hadoop.io.WritableComp
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
-import java.util.*;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
/**
* An HColumnDescriptor contains information about a column family such as the
@@ -46,6 +54,7 @@ import java.util.*;
* column and recreating it. If there is data stored in the column, it will be
* deleted when the column is deleted.
*/
+@ThriftStruct
public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
static final Log LOG = LogFactory.getLog(HColumnDescriptor.class);
// For future backward compatibility
@@ -250,6 +259,16 @@ public class HColumnDescriptor implement
DEFAULT_TTL, DEFAULT_BLOOMFILTER);
}
+ @ThriftConstructor
+ public HColumnDescriptor(@ThriftField(1) final byte[] name,
+ @ThriftField(2) final Map<byte[], byte[]> valuesMap) {
+ this(name);
+ for (Map.Entry<byte[], byte[]> e : valuesMap.entrySet()) {
+ this.values.put(new ImmutableBytesWritable(e.getKey()),
+ new ImmutableBytesWritable(e.getValue()));
+ }
+ }
+
/**
* Constructor.
* Makes a deep copy of the supplied descriptor.
@@ -412,7 +431,8 @@ public class HColumnDescriptor implement
/**
* @return Name of this column family
*/
- public byte [] getName() {
+ @ThriftField(1)
+ public byte[] getName() {
return name;
}
@@ -453,6 +473,17 @@ public class HColumnDescriptor implement
return Collections.unmodifiableMap(values);
}
+ @ThriftField(2)
+ public Map<byte[], byte[]> getByteValues() {
+ Map<byte[], byte[]> byteMap =
+ new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+ for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e :
+ this.values.entrySet()) {
+ byteMap.put(e.getKey().copyBytes(), e.getValue().copyBytes());
+ }
+ return byteMap;
+ }
+
/**
* @param key The key.
* @param value The value.
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java Wed Mar 12 21:17:13 2014
@@ -19,14 +19,14 @@
*/
package org.apache.hadoop.hbase;
+import java.nio.ByteBuffer;
+
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.CompactionManager;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.nativeio.NativeIO;
-import java.nio.ByteBuffer;
-
/**
* HConstants holds a bunch of HBase-related constants
*/
@@ -191,9 +191,71 @@ public final class HConstants {
/** Parameter name for port region server listens on. */
public static final String REGIONSERVER_PORT = "hbase.regionserver.port";
+ /** The Parameter for the thrift clients to talk to the thrift server **/
+ public static final String REGIONSERVER_SWIFT_PORT = "hbase.regionserver.swift.port";
+
/** Default port region server listens on. */
public static final int DEFAULT_REGIONSERVER_PORT = 60020;
+ /** Parameter name for thrift port master listens to. */
+ public static final String MASTER_THRIFT_PORT = "hbase.master.thrift.port";
+
+ public static final int MASTER_THRIFT_PORT_DEFAULT = 60022;
+
+ /** The port on which thrit server is listen at **/
+ public static final int DEFAULT_REGIONSERVER_SWIFT_PORT = 60021;
+
+ /** Boolean flag to represent whether to use Hadoop RPC or not **/
+ public static final String REGIONSERVER_USE_HADOOP_RPC = "hbase.regionserver.rpc.hadoop.enabled";
+
+ /** Default value of REGIONSERVER_USE_HADOOP_RPC boolean flag **/
+ public static final boolean DEFAULT_REGIONSERVER_USE_HADOOP_RPC = false;
+
+ /** Boolean flag to represent whether to use THRIFT or not **/
+ public static final String REGIONSERVER_USE_THRIFT = "hbase.regionserver.rpc.thrift.enabled";
+
+ /** Value of the REGIONSERVER_USE_THRIFT boolean flag **/
+ public static final boolean DEFAULT_REGIONSERVER_USE_THRIFT = true;
+
+ /** Flag to enable/disable client to regionserver communication via Thrift **/
+ public static final String CLIENT_TO_RS_USE_THRIFT = "hbase.client.rpc.tors.thrift.enabled";
+
+ /**
+ * The default value represent whether to use thrift in
+ * client to regionserver communication
+ */
+ public static final boolean CLIENT_TO_RS_USE_THRIFT_DEFAULT = true;
+
+ /** Flag to enable/disable master to regionserver communication via Thrift **/
+ public static final String MASTER_TO_RS_USE_THRIFT = "hbase.master.regionserver.thrift.enabled";
+
+ /** Value of {@link HConstants#MASTER_TO_RS_USE_THRIFT} **/
+ public static final boolean MASTER_TO_RS_USE_THRIFT_DEFAULT = true;
+
+ /**
+ * The configuration parameter by which we set the client --> master
+ * communication to happen via thrift.
+ */
+ public static final String CLIENT_TO_MASTER_USE_THRIFT =
+ "hbase.client.rpc.tomaster.thrift.enabled";
+
+ /**
+ * The default value of {@link HConstants#CLIENT_TO_MASTER_USE_THRIFT}
+ */
+ public static final boolean CLIENT_TO_MASTER_USE_THRIFT_DEFAULT = false;
+
+ /**
+ * Knob to override the hadoop port info with thrift port info.
+ */
+ public static final String REGION_SERVER_WRITE_THRIFT_INFO_TO_META =
+ "hbase.regionserver.write.thrift.info.to.meta";
+
+ /**
+ * Default value of {@link HConstants#REGION_SERVER_WRITE_THRIFT_INFO_TO_META}
+ */
+ public static final boolean REGION_SERVER_WRITE_THRIFT_INFO_TO_META_DEFAULT =
+ true;
+
/** default port for region server web api */
public static final int DEFAULT_REGIONSERVER_INFOPORT = 60030;
@@ -219,6 +281,9 @@ public final class HConstants {
/** Default value for enabling regionChecker */
public static final Boolean DEFAULT_REGION_CHECKER_ENABLED = false;
+ /** Parameter name for what thrift region server implementation class to use */
+ public static final String THRIFT_REGION_SERVER_IMPL = "hbase.thriftregionserver.impl";
+
/** Parameter name for what compaction manager to use. */
public static final String COMPACTION_MANAGER_CLASS = "hbase.compactionmanager.class";
@@ -337,6 +402,14 @@ public final class HConstants {
*/
public static final int DEFAULT_HDFS_QUORUM_READ_THREADS_MAX = 50;
+ /**
+ * If using quorum reads from HDFS, the timeout of using another region server.
+ */
+ public static final String HDFS_QUORUM_READ_TIMEOUT_MILLIS =
+ "hbase.dfsclient.quorum.reads.timeout";
+ public static final long DEFAULT_HDFS_QUORUM_READ_TIMEOUT_MILLIS = 0;
+
+
/** Default maximum file size */
public static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
@@ -398,7 +471,7 @@ public final class HConstants {
"hbase.regionserver.preload.blocks.kept.in.cache";
/** Default maximum number of preload blocks to keep in block cache per hfilescanner */
public static final int DEFAULT_MAX_PRELOAD_BLOCKS_KEPT_IN_CACHE = 128;
-
+
// Always store the location of the root table's HRegion.
// This HRegion is never split.
@@ -445,6 +518,9 @@ public final class HConstants {
/** The regioninfo column qualifier */
public static final byte [] REGIONINFO_QUALIFIER = Bytes.toBytes("regioninfo");
+ /** The thriftregioninfo column qualifier */
+ public static final byte [] THRIFT_REGIONINFO_QUALIFIER = Bytes.toBytes("tregioninfo");
+
/** The server column qualifier */
public static final byte [] SERVER_QUALIFIER = Bytes.toBytes("server");
@@ -577,13 +653,21 @@ public final class HConstants {
*/
public static final byte DEFAULT_CLUSTER_ID = 0;
- /**
- * Parameter name for maximum number of bytes returned when calling a
- * scanner's next method.
- */
+ /**
+ * Parameter name for maximum number of bytes returned when calling a
+ * scanner's next method.
+ */
public static final String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size";
/**
+ * Parameter name for queue length of HTableResultScanner. The total number of
+ * prefetched and cached Results are this number times scanner.getCaching().
+ *
+ */
+ public static final String HBASE_CLIENT_SCANNER_QUEUE_LENGTH = "hbase.client.scanner.queue.length";
+ public static final int DEFAULT_HBASE_CLIENT_SCANNER_QUEUE_LENGTH = 1;
+
+ /**
* Parameter name for the number of threads for the ParallelScanner
*/
public static final String HBASE_CLIENT_PARALLEL_SCANNER_THREAD =
@@ -721,6 +805,30 @@ public final class HConstants {
"hbase.regionserver.hlog.format.backward.compatibility";
/**
+ * Number of threads for swift server
+ */
+ public static final String SWIFT_WORKER_THREADS = "hbase.swift.worker.threads";
+ public static final int SWIFT_WORKER_THREADS_DEFAULT = 300;
+
+ /**
+ * Number of io threads for swift server
+ */
+ public static final String SWIFT_IO_THREADS = "hbase.swift.io.threads";
+ public static final int SWIFT_IO_THREADS_DEFAULT = 60;
+
+ /**
+ * Frame size used for both client and server
+ */
+ public static final String SWIFT_MAX_FRAME_SIZE_BYTES = "hbase.swift.max.response.size";
+ public static final int SWIFT_MAX_FRAME_SIZE_BYTES_DEFAULT = Integer.MAX_VALUE;
+
+ /**
+ * Number of connections for swift server
+ */
+ public static final String SWIFT_CONNECTION_LIMIT = "hbase.swift.connection.limit";
+ public static final int SWIFT_CONNECTION_LIMIT_DEFAULT = 50000;
+
+ /**
* The byte array represents for NO_NEXT_INDEXED_KEY;
* The actual value is irrelevant because this is always compared by reference.
*/
@@ -819,6 +927,22 @@ public final class HConstants {
public static final String MAX_LARGER_CALL_QUEUE_MEMORY_SIZE_STRING = "max.larger.callqueue.memory.size";
public static final int SMALL_QUEUE_REQUEST_LIMIT = 25*1024*1024;
public static final String SMALL_QUEUE_REQUEST_LIMIT_STRING = "small.queue.request.limit";
+ public static final String USE_LOCATEREGION_V2 = "hbase.Client.hconnectionmanager.use.locateregionv2";
+
+
+ public static final String CLIENT_RETRY_NUM_STRING = "hbase.client.retries.number";
+ public static final int DEFAULT_CLIENT_RETRY_NUM = 10;
+
+ public static final String SERVER_REQUESTED_RETRIES_STRING = "hbase.client.server.requested.retries.max";
+ public static final int DEFAULT_SERVER_REQUESTED_RETRIES = 0;
+
+ public static final String CLIENT_RPC_RETRY_TIMEOUT_STRING = "hbase.client.rpc.retry.timeout";
+ public static final long DEFAULT_CLIENT_RPC_RETRY_TIMEOUT = Long.MAX_VALUE;
+
+ public static final String HTABLE_ASYNC_CALLS = "hbase.htable.async.calls";
+ public static final boolean HTABLE_ASYNC_CALLS_DEFAULT = false;
+
+ public static final int DEFAULT_HTABLE_ASYNC_CORE_THREADS = 100;
// These are the IO priority values for various regionserver operations. Note
// that these are priorities relative to each other. See the man page for
@@ -920,6 +1044,19 @@ public final class HConstants {
"hbase.hregion.memstore.bloom.filter.enabled";
public static final boolean DEFAULT_IN_MEMORY_BLOOM_ENABLED = false;
+
+ /**
+ * Use header protocol on client and server. This is used for call context
+ * and disabling/enabling this would do it for both the client and server.
+ */
+ public static final String USE_HEADER_PROTOCOL = "hbase.client.headerprotocol.enabled";
+ public static final boolean DEFAULT_USE_HEADER_PROTOCOL = true;
+
+ public static final String THRIFT_HEADER_FROM_SERVER = "serverHeader";
+ public static final String THRIFT_HEADER_FROM_CLIENT = "clientHeader";
+
+ public static final boolean DISABLE_THRIFT_REGION_INFO_QUALIFIER = false;
+
private HConstants() {
// Can't be instantiated with this constructor.
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMsg.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMsg.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMsg.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMsg.java Wed Mar 12 21:17:13 2014
@@ -136,7 +136,7 @@ public class HMsg implements Writable {
* Run compaction on a specific column family within a region.
*/
MSG_REGION_CF_COMPACT,
-
+
/**
* Run major compaction on a specific column family within a region.
*/
@@ -144,7 +144,7 @@ public class HMsg implements Writable {
/**
* Region server is going down for restart
- *
+ *
* Note that this message is followed by MSG_REPORT_CLOSE messages for each
* region the region server was serving, unless it was told to quiesce.
*/
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java Wed Mar 12 21:17:13 2014
@@ -33,11 +33,16 @@ import org.apache.hadoop.hbase.util.MD5H
import org.apache.hadoop.io.VersionedWritable;
import org.apache.hadoop.io.WritableComparable;
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+
/**
* HRegion information.
* Contains HRegion id, start and end keys, a reference to this
* HRegions' table descriptor, etc.
*/
+@ThriftStruct
public class HRegionInfo extends VersionedWritable implements WritableComparable<HRegionInfo>{
private static final byte VERSION = 0;
private static final Log LOG = LogFactory.getLog(HRegionInfo.class);
@@ -126,7 +131,7 @@ public class HRegionInfo extends Version
new HRegionInfo(1L, HTableDescriptor.META_TABLEDESC);
private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY;
- private boolean offLine = false;
+ private boolean offline = false;
private long regionId = -1;
private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY;
private String regionNameStr = "";
@@ -144,7 +149,7 @@ public class HRegionInfo extends Version
result ^= this.regionId;
result ^= Arrays.hashCode(this.startKey);
result ^= Arrays.hashCode(this.endKey);
- result ^= Boolean.valueOf(this.offLine).hashCode();
+ result ^= Boolean.valueOf(this.offline).hashCode();
result ^= this.tableDesc.hashCode();
this.hashCode = result;
}
@@ -154,7 +159,6 @@ public class HRegionInfo extends Version
* first meta regions
*/
private HRegionInfo(long regionId, HTableDescriptor tableDesc) {
- super();
this.regionId = regionId;
this.tableDesc = tableDesc;
@@ -167,7 +171,6 @@ public class HRegionInfo extends Version
/** Default constructor - creates empty object */
public HRegionInfo() {
- super();
this.tableDesc = new HTableDescriptor();
}
@@ -212,23 +215,69 @@ public class HRegionInfo extends Version
* @param regionid Region id to use.
* @throws IllegalArgumentException
*/
- public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
- final byte [] endKey, final boolean split, final long regionid)
+ public HRegionInfo(HTableDescriptor tableDesc,
+ final byte[] startKey,
+ final byte[] endKey,
+ final boolean split,
+ final long regionid)
+ throws IllegalArgumentException {
+ if (tableDesc == null) {
+ throw new IllegalArgumentException("tableDesc cannot be null");
+ }
+ this.offline = false;
+ this.regionId = regionid;
+ this.regionName = createRegionName(tableDesc.getName(), startKey, regionId,
+ true);
+ this.regionNameStr = Bytes.toStringBinary(this.regionName);
+ this.split = split;
+ this.endKey = endKey == null ? HConstants.EMPTY_END_ROW : endKey.clone();
+ this.startKey = startKey == null ? HConstants.EMPTY_START_ROW : startKey
+ .clone();
+ this.tableDesc = tableDesc;
+ setHashCode();
+ }
+
+ /**
+ * Thrift constructor
+ * Construct HRegionInfo with explicit parameters
+ *
+ * @param tableDesc the table descriptor
+ * @param startKey first key in region
+ * @param endKey end of key range
+ * @param split true if this region has split and we have daughter regions
+ * regions that may or may not hold references to this region.
+ * @param regionid Region id to use.
+ * @throws IllegalArgumentException
+ */
+ @ThriftConstructor
+ public HRegionInfo(@ThriftField(1) HTableDescriptor tableDesc,
+ @ThriftField(2) final byte[] startKey,
+ @ThriftField(3) final byte[] endKey,
+ @ThriftField(4) final boolean split,
+ @ThriftField(5) final long regionid,
+ @ThriftField(6) final byte[] splitPoint,
+ @ThriftField(7) final boolean offline)
throws IllegalArgumentException {
- super();
if (tableDesc == null) {
throw new IllegalArgumentException("tableDesc cannot be null");
}
- this.offLine = false;
+ this.offline = false;
this.regionId = regionid;
- this.regionName = createRegionName(tableDesc.getName(), startKey, regionId, true);
+ boolean newFormat = true;
+ if (tableDesc.isMetaRegion()) {
+ newFormat = false;
+ }
+ this.regionName = createRegionName(tableDesc.getName(), startKey, regionId,
+ newFormat);
this.regionNameStr = Bytes.toStringBinary(this.regionName);
this.split = split;
- this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone();
- this.startKey = startKey == null?
- HConstants.EMPTY_START_ROW: startKey.clone();
+ this.endKey = endKey == null ? HConstants.EMPTY_END_ROW : endKey.clone();
+ this.startKey = startKey == null ? HConstants.EMPTY_START_ROW : startKey
+ .clone();
this.tableDesc = tableDesc;
setHashCode();
+ this.splitPoint = splitPoint;
+ this.offline = offline;
}
/**
@@ -237,9 +286,8 @@ public class HRegionInfo extends Version
* @param other
*/
public HRegionInfo(HRegionInfo other) {
- super();
this.endKey = other.getEndKey();
- this.offLine = other.isOffline();
+ this.offline = other.isOffline();
this.regionId = other.getRegionId();
this.regionName = other.getRegionName();
this.regionNameStr = Bytes.toStringBinary(this.regionName);
@@ -264,7 +312,7 @@ public class HRegionInfo extends Version
* (such that it contains its encoded name?).
* @return Region name made of passed tableName, startKey and id
*/
- public static byte [] createRegionName(final byte [] tableName,
+ public static byte[] createRegionName(final byte [] tableName,
final byte [] startKey, final String id, boolean newFormat) {
return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat);
}
@@ -369,6 +417,7 @@ public class HRegionInfo extends Version
}
/** @return the regionId */
+ @ThriftField(5)
public long getRegionId(){
return regionId;
}
@@ -405,12 +454,14 @@ public class HRegionInfo extends Version
}
/** @return the startKey */
- public byte [] getStartKey(){
+ @ThriftField(2)
+ public byte[] getStartKey() {
return startKey;
}
/** @return the endKey */
- public byte [] getEndKey(){
+ @ThriftField(3)
+ public byte[] getEndKey() {
return endKey;
}
@@ -445,6 +496,7 @@ public class HRegionInfo extends Version
}
/** @return the tableDesc */
+ @ThriftField(1)
public HTableDescriptor getTableDesc(){
return tableDesc;
}
@@ -488,11 +540,17 @@ public class HRegionInfo extends Version
/**
* @return point to explicitly split the region on
*/
+ @ThriftField(6)
public byte[] getSplitPoint() {
return (this.splitPoint != null && this.splitPoint.length > 0)
? this.splitPoint : null;
}
+ @ThriftField(4)
+ public boolean getSplit() {
+ return this.split;
+ }
+
/**
* @param splitPoint set split status & position to split on
*/
@@ -504,15 +562,16 @@ public class HRegionInfo extends Version
/**
* @return True if this region is offline.
*/
+ @ThriftField(7)
public boolean isOffline() {
- return this.offLine;
+ return this.offline;
}
/**
* @param offLine set online - offline status
*/
- public void setOffline(boolean offLine) {
- this.offLine = offLine;
+ public void setOffline(boolean offline) {
+ this.offline = offline;
}
/**
@@ -570,7 +629,7 @@ public class HRegionInfo extends Version
public void write(DataOutput out) throws IOException {
super.write(out);
Bytes.writeByteArray(out, endKey);
- out.writeBoolean(offLine);
+ out.writeBoolean(offline);
out.writeLong(regionId);
Bytes.writeByteArray(out, regionName);
out.writeBoolean(split);
@@ -586,7 +645,7 @@ public class HRegionInfo extends Version
public void readFields(DataInput in) throws IOException {
super.readFields(in);
this.endKey = Bytes.readByteArray(in);
- this.offLine = in.readBoolean();
+ this.offline = in.readBoolean();
this.regionId = in.readLong();
this.regionName = Bytes.readByteArray(in);
this.regionNameStr = Bytes.toStringBinary(this.regionName);
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerAddress.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerAddress.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerAddress.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerAddress.java Wed Mar 12 21:17:13 2014
@@ -31,9 +31,14 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.WritableComparable;
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+
/**
* HServerAddress is a "label" for a HBase server made of host and port number.
*/
+@ThriftStruct
public class HServerAddress implements WritableComparable<HServerAddress> {
private static final Log LOG = LogFactory.getLog(HServerAddress.class);
@@ -92,11 +97,27 @@ public class HServerAddress implements W
}
/**
+ * Thrift constructor
* @param bindAddress Hostname
* @param port Port number
*/
- public HServerAddress(String bindAddress, int port) {
- this.address = new InetSocketAddress(bindAddress, port);
+ @ThriftConstructor
+ public HServerAddress(
+ @ThriftField(1) String bindAddress,
+ @ThriftField(2) int port) {
+ String hostAndPort = createHostAndPort(bindAddress, port);
+ address = addressCache.get(hostAndPort);
+ if (address == null) {
+ this.address = new InetSocketAddress(bindAddress, port);
+ if (getBindAddress() != null) {
+ // Resolved the hostname successfully, cache it.
+ InetSocketAddress existingAddress = addressCache.putIfAbsent(hostAndPort, address);
+ if (existingAddress != null) {
+ // Another thread cached the address ahead of us, reuse it.
+ this.address = existingAddress;
+ }
+ }
+ }
this.stringValue = getHostAddressWithPort();
checkBindAddressCanBeResolved();
}
@@ -156,11 +177,13 @@ public class HServerAddress implements W
}
/** @return Port number */
+ @ThriftField(2)
public int getPort() {
return address.getPort();
}
/** @return Hostname */
+ @ThriftField(1)
public String getHostname() {
return address.getHostName();
}
@@ -241,4 +264,12 @@ public class HServerAddress implements W
if (address.equals(o.address)) return 0;
return toString().compareTo(o.toString());
}
+
+ public static String createHostAndPort(String host, int port){
+ StringBuilder sb = new StringBuilder();
+ sb.append(host);
+ sb.append(":");
+ sb.append(port);
+ return sb.toString();
+ }
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerInfo.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerInfo.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerInfo.java Wed Mar 12 21:17:13 2014
@@ -23,6 +23,7 @@ import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.SortedMap;
@@ -36,6 +37,10 @@ import org.apache.hadoop.hbase.util.Byte
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+
/**
* HServerInfo is meta info about an {@link HRegionServer}. It is the token
* by which a master distingushes a particular regionserver from the rest.
@@ -48,6 +53,7 @@ import org.apache.hadoop.io.WritableComp
* by. In subsequent communications, the regionserver will pass a HServerInfo
* with the master-supplied address.
*/
+@ThriftStruct
public class HServerInfo implements WritableComparable<HServerInfo> {
private static final Log LOG = LogFactory.getLog(HServerInfo.class);
@@ -65,6 +71,7 @@ public class HServerInfo implements Writ
"-?[0-9]{1," + String.valueOf(Long.MAX_VALUE).length() + "}");
private HServerAddress serverAddress;
+ // start time of the regionserver
private long startCode;
private HServerLoad load;
// Servername is made of hostname, port and startcode.
@@ -76,7 +83,7 @@ public class HServerInfo implements Writ
// For each region, store the last sequence id that was flushed
// from MemStore to an HFile
- private final SortedMap<byte[], Long> flushedSequenceIdByRegion =
+ private SortedMap<byte[], Long> flushedSequenceIdByRegion =
new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
public HServerInfo() {
@@ -109,6 +116,25 @@ public class HServerInfo implements Writ
this.startCode = startCode;
this.load = new HServerLoad();
this.hostname = hostname;
+ }
+
+ @ThriftConstructor
+ public HServerInfo(
+ @ThriftField(1) HServerAddress serverAddress,
+ @ThriftField(2) long startCode,
+ @ThriftField(3) String hostname,
+ @ThriftField(4) Map<byte[], Long> flushedSequenceIdByRegion,
+ @ThriftField(5) boolean sendSequenceIds,
+ @ThriftField(6) String cachedHostnamePort,
+ @ThriftField(7) HServerLoad load) {
+ this.serverAddress = serverAddress;
+ this.startCode = startCode;
+ this.hostname = hostname;
+ this.flushedSequenceIdByRegion = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
+ this.flushedSequenceIdByRegion.putAll(flushedSequenceIdByRegion);
+ this.sendSequenceIds = sendSequenceIds;
+ this.cachedHostnamePort = cachedHostnamePort;
+ this.load = load;
}
/**
@@ -123,6 +149,7 @@ public class HServerInfo implements Writ
this.flushedSequenceIdByRegion.putAll(other.flushedSequenceIdByRegion);
}
+ @ThriftField(7)
public HServerLoad getLoad() {
return load;
}
@@ -131,6 +158,7 @@ public class HServerInfo implements Writ
this.load = load;
}
+ @ThriftField(1)
public synchronized HServerAddress getServerAddress() {
return new HServerAddress(serverAddress);
}
@@ -140,14 +168,23 @@ public class HServerInfo implements Writ
this.serverName = null;
}
+ @ThriftField(2)
public synchronized long getStartCode() {
return startCode;
}
+ @ThriftField(3)
public String getHostname() {
return this.hostname;
}
+ @ThriftField(4)
+ public Map<byte[], Long> getFlushedSequenceIdByRegion() {
+ return this.flushedSequenceIdByRegion;
+ }
+
+
+
public void setFlushedSequenceIdForRegion(byte[] region, long sequenceId) {
flushedSequenceIdByRegion.put(region, sequenceId);
}
@@ -156,10 +193,6 @@ public class HServerInfo implements Writ
return flushedSequenceIdByRegion.get(region);
}
- public SortedMap<byte[], Long> getFlushedSequenceIdByRegion() {
- return flushedSequenceIdByRegion;
- }
-
/**
* @return The hostname and port concatenated with a ':' as separator.
*/
@@ -170,6 +203,11 @@ public class HServerInfo implements Writ
return this.cachedHostnamePort;
}
+ @ThriftField(6)
+ public String getCachedHostnamePort() {
+ return this.cachedHostnamePort;
+ }
+
/**
* @param hostname
* @param port
@@ -199,17 +237,8 @@ public class HServerInfo implements Writ
Integer.parseInt(hostAndPort.substring(index + 1)), startcode);
}
- /**
- * @param address Server address
- * @param startCode Server startcode
- * @return Server name made of the concatenation of hostname, port and
- * startcode formatted as <code><hostname> ',' <port> ',' <startcode></code>
- */
- public static String getServerName(HServerAddress address, long startCode) {
- return getServerName(address.getHostname(), address.getPort(), startCode);
- }
- /*
+ /**
* @param hostName
* @param port
* @param startCode
@@ -229,6 +258,11 @@ public class HServerInfo implements Writ
this.sendSequenceIds = sendSequenceIds;
}
+ @ThriftField(5)
+ public boolean getSendSequenceIds() {
+ return this.sendSequenceIds;
+ }
+
/**
* @return ServerName and load concatenated.
* @see #getServerName()
@@ -298,7 +332,7 @@ public class HServerInfo implements Writ
* @param hostAndPortOnly If <code>serverName</code> is a
* <code>hostname ':' port</code>
* or <code>hostname , port , startcode</code>.
- * @return True if <code>serverName</code> found in <code>servers</code>
+ * @return true if <code>serverName</code> found in <code>servers</code>
*/
public static boolean isServer(final Set<String> servers,
final String serverName, final boolean hostAndPortOnly) {
@@ -323,7 +357,7 @@ public class HServerInfo implements Writ
String[] components = serverName.split(SERVERNAME_SEPARATOR);
if (components.length != 3) {
String msg = "Invalid number of components in server name: " + serverName;
- LOG.info(msg);
+ LOG.error(msg);
throw new IllegalArgumentException(msg);
}
String hostName = components[0];
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerLoad.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerLoad.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerLoad.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HServerLoad.java Wed Mar 12 21:17:13 2014
@@ -25,16 +25,21 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.List;
-import org.apache.hadoop.hbase.metrics.RequestMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+
/**
* This class encapsulates metrics for determining the load on a HRegionServer
*/
+@ThriftStruct
public class HServerLoad implements WritableComparable<HServerLoad> {
/** number of regions */
// could just use regionLoad.size() but master.RegionManager likes to play
@@ -48,15 +53,17 @@ public class HServerLoad implements Writ
/** the maximum allowable size of the heap, in MB */
private int maxHeapMB;
/** per-region load metrics */
- private ArrayList<RegionLoad> regionLoad = new ArrayList<RegionLoad>();
+ private List<RegionLoad> regionLoad = new ArrayList<RegionLoad>();
// lastLoadRefreshTime and expireAfter are only maintained by the
// master. They are not serialized and reported by the region servers
public volatile /* transient */ long lastLoadRefreshTime = 0;
public volatile /* transient */ long expireAfter = Long.MAX_VALUE;
+
/**
* Encapsulates per-region loading metrics.
*/
+ @ThriftStruct
public static class RegionLoad implements Writable {
/** the region name */
private byte[] name;
@@ -113,11 +120,17 @@ public class HServerLoad implements Writ
* @param readRequestPerSec
* @param readRequestPerSec
*/
- public RegionLoad(final byte[] name, final int stores,
- final int storefiles, final int storefileSizeMB,
- final int memstoreSizeMB, final int storefileIndexSizeMB,
- final int rootIndexSizeKB, final int totalStaticIndexSizeKB,
- final int totalStaticBloomSizeKB) {
+ @ThriftConstructor
+ public RegionLoad(
+ @ThriftField(1) final byte[] name,
+ @ThriftField(2) final int stores,
+ @ThriftField(3) final int storefiles,
+ @ThriftField(4) final int storefileSizeMB,
+ @ThriftField(5) final int memstoreSizeMB,
+ @ThriftField(6) final int storefileIndexSizeMB,
+ @ThriftField(7) final int rootIndexSizeKB,
+ @ThriftField(8) final int totalStaticIndexSizeKB,
+ @ThriftField(9) final int totalStaticBloomSizeKB) {
this.name = name;
this.stores = stores;
this.storefiles = storefiles;
@@ -134,6 +147,7 @@ public class HServerLoad implements Writ
/**
* @return the region name
*/
+ @ThriftField(1)
public byte[] getName() {
return name;
}
@@ -148,6 +162,7 @@ public class HServerLoad implements Writ
/**
* @return the number of stores
*/
+ @ThriftField(2)
public int getStores() {
return stores;
}
@@ -155,6 +170,7 @@ public class HServerLoad implements Writ
/**
* @return the number of storefiles
*/
+ @ThriftField(3)
public int getStorefiles() {
return storefiles;
}
@@ -162,6 +178,7 @@ public class HServerLoad implements Writ
/**
* @return the total size of the storefiles, in MB
*/
+ @ThriftField(4)
public int getStorefileSizeMB() {
return storefileSizeMB;
}
@@ -169,17 +186,34 @@ public class HServerLoad implements Writ
/**
* @return the memstore size, in MB
*/
- public int getMemStoreSizeMB() {
+ @ThriftField(5)
+ public int getMemstoreSizeMB() {
return memstoreSizeMB;
}
/**
* @return the approximate size of storefile indexes on the heap, in MB
*/
+ @ThriftField(6)
public int getStorefileIndexSizeMB() {
return storefileIndexSizeMB;
}
+ @ThriftField(7)
+ public int getRootIndexSizeKB() {
+ return rootIndexSizeKB;
+ }
+
+ @ThriftField(8)
+ public int getTotalStaticIndexSizeKB() {
+ return totalStaticIndexSizeKB;
+ }
+
+ @ThriftField(9)
+ public int getTotalStaticBloomSizeKB() {
+ return totalStaticBloomSizeKB;
+ }
+
// Setters
/**
@@ -314,6 +348,24 @@ public class HServerLoad implements Writ
this.regionLoad.addAll(hsl.regionLoad);
}
+ @ThriftConstructor
+ public HServerLoad(
+ @ThriftField(1) int numberOfRegions,
+ @ThriftField(2) int numberOfRequests,
+ @ThriftField(3) int usedHeapMB,
+ @ThriftField(4) int maxHeapMB,
+ @ThriftField(5) List<RegionLoad> regionLoad,
+ @ThriftField(6) long lastLoadRefreshTime,
+ @ThriftField(7) long expireAfter) {
+ this.numberOfRegions = numberOfRegions;
+ this.numberOfRequests = numberOfRequests;
+ this.usedHeapMB = usedHeapMB;
+ this.maxHeapMB = maxHeapMB;
+ this.regionLoad = regionLoad;
+ this.lastLoadRefreshTime = lastLoadRefreshTime;
+ this.expireAfter = expireAfter;
+ }
+
/**
* Originally, this method factored in the effect of requests going to the
* server as well. However, this does not interact very well with the current
@@ -388,6 +440,7 @@ public class HServerLoad implements Writ
/**
* @return the numberOfRegions
*/
+ @ThriftField(1)
public int getNumberOfRegions() {
return numberOfRegions;
}
@@ -395,6 +448,7 @@ public class HServerLoad implements Writ
/**
* @return the numberOfRequests
*/
+ @ThriftField(2)
public int getNumberOfRequests() {
return numberOfRequests;
}
@@ -402,6 +456,7 @@ public class HServerLoad implements Writ
/**
* @return the amount of heap in use, in MB
*/
+ @ThriftField(3)
public int getUsedHeapMB() {
return usedHeapMB;
}
@@ -409,6 +464,7 @@ public class HServerLoad implements Writ
/**
* @return the maximum allowable heap size, in MB
*/
+ @ThriftField(4)
public int getMaxHeapMB() {
return maxHeapMB;
}
@@ -426,7 +482,7 @@ public class HServerLoad implements Writ
public int getStorefiles() {
int count = 0;
for (RegionLoad info: regionLoad)
- count += info.getStorefiles();
+ count += info.getStorefiles();
return count;
}
@@ -446,7 +502,7 @@ public class HServerLoad implements Writ
public int getMemStoreSizeInMB() {
int count = 0;
for (RegionLoad info: regionLoad)
- count += info.getMemStoreSizeMB();
+ count += info.getMemstoreSizeMB();
return count;
}
@@ -456,12 +512,30 @@ public class HServerLoad implements Writ
public int getStorefileIndexSizeInMB() {
int count = 0;
for (RegionLoad info: regionLoad)
- count += info.getStorefileIndexSizeMB();
+ count += info.getStorefileIndexSizeMB();
return count;
}
+ @ThriftField(5)
+ public List<RegionLoad> getRegionLoad() {
+ return regionLoad;
+ }
+
+ @ThriftField(6)
+ public long getLastLoadRefreshTime() {
+ return lastLoadRefreshTime;
+ }
+
+ @ThriftField(7)
+ public long getExpireAfter() {
+ return expireAfter;
+ }
+
+
+
// Setters
+
/**
* @param numberOfRegions the number of regions
*/
@@ -545,4 +619,5 @@ public class HServerLoad implements Writ
public int compareTo(HServerLoad o) {
return this.getLoad() - o.getLoad();
}
+
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java Wed Mar 12 21:17:13 2014
@@ -22,27 +22,33 @@ package org.apache.hadoop.hbase;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.io.hfile.Compression;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.WritableComparable;
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+
/**
* HTableDescriptor contains the name of an HTable, and its
* column families.
*/
+@ThriftStruct
public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
// Changes prior to version 3 were not recorded here.
@@ -50,12 +56,12 @@ public class HTableDescriptor implements
// Version 4 adds indexes
// Version 5 removed transactional pollution -- e.g. indexes
public static final byte TABLE_DESCRIPTOR_VERSION = 5;
-
+
private byte [] name = HConstants.EMPTY_BYTE_ARRAY;
private String nameAsString = "";
// Table metadata
- protected final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
+ protected Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
public static final String FAMILIES = "FAMILIES";
@@ -157,12 +163,40 @@ public class HTableDescriptor implements
for(HColumnDescriptor descriptor : families) {
this.families.put(descriptor.getName(), descriptor);
}
- for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
- values.entrySet()) {
- this.values.put(entry.getKey(), entry.getValue());
+ this.values.putAll(values);
+ }
+
+ @ThriftConstructor
+ public HTableDescriptor(
+ @ThriftField(1) final byte[] name,
+ @ThriftField(2) List<HColumnDescriptor> families,
+ @ThriftField(3) Map<byte[], byte[]> values) {
+ this.name = name;
+ this.nameAsString = Bytes.toString(this.name);
+ setMetaFlags(this.name);
+ for (HColumnDescriptor descriptor : families) {
+ this.families.put(descriptor.getName(), descriptor);
+ }
+ for (Entry<byte[], byte[]> entry : values.entrySet()) {
+ this.values.put(new ImmutableBytesWritable(entry.getKey()), new ImmutableBytesWritable(entry.getValue()));
}
}
+ @ThriftField(2)
+ public List<HColumnDescriptor> getFamiliesForThrift() {
+ List<HColumnDescriptor> listToReturn = new ArrayList<HColumnDescriptor>();
+ listToReturn.addAll(this.families.values());
+ return listToReturn;
+ }
+
+ @ThriftField(3)
+ public Map<byte[], byte[]> getValuesForThrift() {
+ Map<byte[], byte[]> mapToReturn = new HashMap<byte[], byte[]>();
+ for (Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry : this.values.entrySet()) {
+ mapToReturn.put(entry.getKey().get(), entry.getValue().get());
+ }
+ return mapToReturn;
+ }
/**
* Constructs an empty object.
@@ -193,7 +227,7 @@ public class HTableDescriptor implements
* <code>[a-zA-Z_0-9-.].
* @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
*/
- public HTableDescriptor(final byte [] name) {
+ public HTableDescriptor(final byte[] name) {
super();
setMetaFlags(this.name);
this.name = this.isMetaRegion()? name: isLegalTableName(name);
@@ -215,10 +249,7 @@ public class HTableDescriptor implements
for (HColumnDescriptor c: desc.families.values()) {
this.families.put(c.getName(), new HColumnDescriptor(c));
}
- for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
- desc.values.entrySet()) {
- this.values.put(e.getKey(), e.getValue());
- }
+ this.values.putAll(desc.values);
}
/*
@@ -425,7 +456,8 @@ public class HTableDescriptor implements
}
/** @return name of table */
- public byte [] getName() {
+ @ThriftField(1)
+ public byte[] getName() {
return name;
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/KeyValue.java Wed Mar 12 21:17:13 2014
@@ -19,14 +19,10 @@
*/
package org.apache.hadoop.hbase;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Map;
-
+import com.facebook.swift.codec.ThriftConstructor;
+import com.facebook.swift.codec.ThriftField;
+import com.facebook.swift.codec.ThriftStruct;
+import com.google.common.primitives.Longs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
@@ -37,7 +33,13 @@ import org.apache.hadoop.hbase.util.Clas
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Writable;
-import com.google.common.primitives.Longs;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
/**
* An HBase Key/Value.
@@ -67,6 +69,7 @@ import com.google.common.primitives.Long
* <p>TODO: Group Key-only comparators and operations into a Key class, just
* for neatness sake, if can figure what to call it.
*/
+@ThriftStruct
public class KeyValue implements Writable, HeapSize, Cloneable {
static final Log LOG = LogFactory.getLog(KeyValue.class);
@@ -255,7 +258,8 @@ public class KeyValue implements Writabl
* Presumes <code>bytes</code> content is formatted as a KeyValue blob.
* @param bytes byte array
*/
- public KeyValue(final byte [] bytes) {
+ @ThriftConstructor
+ public KeyValue(@ThriftField(1) final byte [] bytes) {
this(bytes, 0);
}
@@ -277,7 +281,9 @@ public class KeyValue implements Writabl
* @param offset offset to start of the KeyValue
* @param length length of the KeyValue
*/
- public KeyValue(final byte [] bytes, final int offset, final int length) {
+ public KeyValue(final byte [] bytes,
+ final int offset,
+ final int length) {
this.bytes = bytes;
this.offset = offset;
this.length = length;
@@ -700,6 +706,16 @@ public class KeyValue implements Writabl
return length;
}
+ @ThriftField(1)
+ public byte[] getKeyValueBytes() {
+ if (this.bytes.length == getLength()) {
+ return this.bytes;
+ }
+ byte [] kv = new byte[getLength()];
+ System.arraycopy(getBuffer(), getOffset(), kv, 0, getLength());
+ return kv;
+ }
+
//---------------------------------------------------------------------------
//
// Length and Offset Calculators
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java Wed Mar 12 21:17:13 2014
@@ -119,7 +119,26 @@ public class LocalHBaseCluster {
// Always have masters and regionservers come up on port '0' so we don't
// clash over default ports.
conf.set(HConstants.MASTER_PORT, "0");
- conf.set(HConstants.REGIONSERVER_PORT, "0");
+
+ boolean writeThriftPortToMeta =
+ conf.getBoolean(HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META,
+ HConstants.REGION_SERVER_WRITE_THRIFT_INFO_TO_META_DEFAULT);
+
+
+ // If we are writing the thrift port to meta, then we can start the
+ // thrift server at any ephemeral port. Otherwise, we start it at the
+ // default port. But then, we can only start one server. Similarly for
+ // RPC server.
+ if (writeThriftPortToMeta) {
+ conf.setInt(HConstants.REGIONSERVER_SWIFT_PORT, 0);
+ conf.setInt(HConstants.REGIONSERVER_PORT,
+ HConstants.DEFAULT_REGIONSERVER_PORT);
+ } else {
+ conf.setInt(HConstants.REGIONSERVER_SWIFT_PORT,
+ HConstants.DEFAULT_REGIONSERVER_SWIFT_PORT);
+ conf.setInt(HConstants.REGIONSERVER_PORT, 0);
+ }
+
// Start the HMasters.
this.masterClass =
(Class<? extends HMaster>)conf.getClass(HConstants.MASTER_IMPL,
@@ -155,8 +174,8 @@ public class LocalHBaseCluster {
* @return the new master
*/
public HMaster addMaster() throws IOException {
- Configuration masterConf = new Configuration(conf);
- // Create each master with its own Configuration instance so each has
+ Configuration masterConf = HBaseConfiguration.create(conf);
+ // Creating each master with its own Configuration instance so each has
// its HConnection instance rather than share (see HBASE_INSTANCES down in
// the guts of HConnectionManager).
HMaster m = JVMClusterUtil.createMaster(masterConf, this.masterClass);
@@ -209,7 +228,7 @@ public class LocalHBaseCluster {
/**
* Wait for the specified master to stop, and removes this thread from list
* of running threads.
- *
+ *
* @param serverNumber the 0-based index of the master to stop
* @return Name of master that just went down.
*/
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/YouAreDeadException.java Wed Mar 12 21:17:13 2014
@@ -32,3 +32,4 @@ public class YouAreDeadException extends
super(message);
}
}
+//