You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2014/03/12 22:17:20 UTC
svn commit: r1576909 [11/18] - in /hbase/branches/0.89-fb/src: ./
examples/thrift/ main/java/org/apache/hadoop/hbase/
main/java/org/apache/hadoop/hbase/avro/
main/java/org/apache/hadoop/hbase/avro/generated/
main/java/org/apache/hadoop/hbase/client/ ma...
Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ThriftHRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ThriftHRegionServer.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ThriftHRegionServer.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/ThriftHRegionServer.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,633 @@
+/*
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.MultiAction;
+import org.apache.hadoop.hbase.client.MultiPut;
+import org.apache.hadoop.hbase.client.MultiPutResponse;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowLock;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TMultiResponse;
+import org.apache.hadoop.hbase.client.TRowMutations;
+import org.apache.hadoop.hbase.io.hfile.histogram.HFileHistogram.Bucket;
+import org.apache.hadoop.hbase.ipc.ThriftHRegionInterface;
+import org.apache.hadoop.hbase.ipc.thrift.exceptions.ThriftHBaseException;
+import org.apache.hadoop.hbase.master.AssignmentPlan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.MapWritable;
+import org.apache.hadoop.io.Writable;
+
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * This is just a wrapper around {@link HRegionServer}
+ *
+ */
+public class ThriftHRegionServer implements ThriftHRegionInterface{
+ public static Log LOG = LogFactory.getLog(ThriftHRegionServer.class);
+
+ private HRegionServer server;
+
+ public ThriftHRegionServer(HRegionServer server) {
+ this.server = server;
+ }
+
+ @Override
+ public HRegionInfo getRegionInfo(byte[] regionName)
+ throws ThriftHBaseException {
+ try {
+ HRegionInfo r = server.getRegionInfo(regionName);
+ LOG.debug("Printing the result of getClosestRowOrBefore : " + r);
+ return r;
+ } catch (NotServingRegionException e) {
+ e.printStackTrace();
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public Result getClosestRowBefore(byte[] regionName, byte[] row, byte[] family)
+ throws ThriftHBaseException {
+ try {
+ Result r = server.getClosestRowBefore(regionName, row, family);
+ if (r == null) {
+ return Result.SENTINEL_RESULT;
+ } else {
+ return addThriftRegionInfoQualifierIfNeeded(r);
+ }
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<Result> getClosestRowBeforeAsync(final byte[] regionName,
+ final byte[] row, final byte[] family) {
+ try {
+ Result result = getClosestRowBefore(regionName, row, family);
+ return Futures.immediateFuture(result);
+ } catch (ThriftHBaseException e) {
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ /**
+ * If there is a 'regioninfo' value in the result, it would be in Writable
+ * serialized form. Add a thrift serialized HRegionInfo object for the
+ * non-Java objects.
+ */
+ private Result addThriftRegionInfoQualifierIfNeeded(Result tentativeResult) {
+ //TODO: Thrift has some problem serializing HRegionInfo. Since this method is only
+ // for C++ client, I temporarily disable it. After the problem is fixed we should
+ // remove the flag.
+ if (HConstants.DISABLE_THRIFT_REGION_INFO_QUALIFIER) {
+ return tentativeResult;
+ }
+
+ // Get the serialized HRegionInfo object
+ byte[] value = tentativeResult.searchValue(HConstants.CATALOG_FAMILY,
+ HConstants.REGIONINFO_QUALIFIER);
+ // If the value exists, then we need to do something, otherwise, we return
+ // the result untouched.
+ if (value != null && value.length > 0) {
+ try {
+ // Get the Writable-serialized HRegionInfo object.
+ HRegionInfo hri = (HRegionInfo) Writables.getWritable(value,
+ new HRegionInfo());
+ byte[] thriftSerializedHri =
+ Bytes.writeThriftBytes(hri, HRegionInfo.class);
+ // Create the KV with the thrift-serialized HRegionInfo
+ List<KeyValue> kvList = tentativeResult.getKvs();
+ KeyValue kv = new KeyValue(kvList.get(0).getRow(),
+ HConstants.CATALOG_FAMILY, HConstants.THRIFT_REGIONINFO_QUALIFIER,
+ thriftSerializedHri);
+ kvList.add(kv);
+ return new Result(kvList);
+ } catch (Exception e) {
+ // If failed, log the mistake and returns the original result
+ LOG.error("Thrift Serialization of the HRegionInfo object failed!");
+ e.printStackTrace();
+ }
+ }
+ return tentativeResult;
+ }
+
+ @Override
+ public void flushRegion(byte[] regionName)
+ throws ThriftHBaseException {
+ try {
+ server.flushRegion(regionName);
+ } catch (Exception e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public void flushRegion(byte[] regionName, long ifOlderThanTS)
+ throws ThriftHBaseException {
+ try {
+ server.flushRegion(regionName, ifOlderThanTS);
+ } catch (IllegalArgumentException e) {
+ throw new ThriftHBaseException(e);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public long getLastFlushTime(byte[] regionName) {
+ return server.getLastFlushTime(regionName);
+ }
+
+ @Override
+ public Map<byte[], Long> getLastFlushTimes() {
+ MapWritable mapWritable = server.getLastFlushTimes();
+ Map<byte[], Long> map = new HashMap<byte[], Long>();
+
+ for (Entry<Writable, Writable> e : mapWritable.entrySet()) {
+ map.put(((BytesWritable) e.getKey()).getBytes(),
+ ((LongWritable) e.getValue()).get());
+ }
+ return map;
+ }
+
+ @Override
+ public long getCurrentTimeMillis() {
+ return server.getCurrentTimeMillis();
+ }
+
+ @Override
+ public long getStartCode() {
+ return server.getStartCode();
+ }
+
+ @Override
+ public List<String> getStoreFileList(byte[] regionName, byte[] columnFamily)
+ throws ThriftHBaseException {
+ try {
+ return server.getStoreFileList(regionName, columnFamily);
+ } catch (IllegalArgumentException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public List<String> getStoreFileListForColumnFamilies(byte[] regionName,
+ List<byte[]> columnFamilies) throws ThriftHBaseException {
+ try {
+ byte[][] columnFamiliesArray = new byte[columnFamilies.size()][];
+ return server.getStoreFileList(regionName, columnFamilies.toArray(columnFamiliesArray));
+ } catch (IllegalArgumentException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public List<String> getStoreFileListForAllColumnFamilies(byte[] regionName)
+ throws ThriftHBaseException {
+ try {
+ return server.getStoreFileList(regionName);
+ } catch (IllegalArgumentException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public List<String> getHLogsList(boolean rollCurrentHLog)
+ throws ThriftHBaseException {
+ try {
+ return server.getHLogsList(rollCurrentHLog);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public Result get(byte[] regionName, Get get) throws ThriftHBaseException {
+ try {
+ return server.get(regionName, get);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<Result> getAsync(byte[] regionName, Get get) {
+ try {
+ return Futures.immediateFuture(get(regionName, get));
+ } catch (ThriftHBaseException e) {
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public List<Result> getRows(byte[] regionName, List<Get> gets)
+ throws ThriftHBaseException {
+ try {
+ List<Result> resultList = new ArrayList<>();
+ Collections.addAll(resultList, server.get(regionName, gets));
+ return resultList;
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public boolean exists(byte[] regionName, Get get)
+ throws ThriftHBaseException {
+ try {
+ return server.exists(regionName, get);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+
+ @Override
+ public void put(byte[] regionName, Put put) throws ThriftHBaseException {
+ try {
+ server.put(regionName, put);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public int putRows(byte[] regionName, List<Put> puts)
+ throws ThriftHBaseException {
+ try {
+ return server.put(regionName, puts);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public void processDelete(byte[] regionName, Delete delete)
+ throws ThriftHBaseException {
+ try {
+ server.delete(regionName, delete);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<Void> deleteAsync(final byte[] regionName, final Delete delete) {
+ try {
+ processDelete(regionName, delete);
+ return Futures.immediateFuture(null);
+ } catch (ThriftHBaseException e) {
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public int processListOfDeletes(byte[] regionName, List<Delete> deletes)
+ throws ThriftHBaseException {
+ try {
+ return server.delete(regionName, deletes);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] regionName, byte[] row, byte[] family,
+ byte[] qualifier, byte[] value, Put put) throws ThriftHBaseException {
+ try {
+ return server.checkAndPut(regionName, row, family, qualifier, value, put);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] regionName, byte[] row, byte[] family,
+ byte[] qualifier, byte[] value, Delete delete)
+ throws ThriftHBaseException {
+ try {
+ return server.checkAndDelete(regionName, row, family, qualifier, value,
+ delete);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public long incrementColumnValue(byte[] regionName, byte[] row,
+ byte[] family, byte[] qualifier, long amount, boolean writeToWAL)
+ throws ThriftHBaseException {
+ try {
+ return server.incrementColumnValue(regionName, row, family, qualifier, amount, writeToWAL);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public long openScanner(byte[] regionName, Scan scan)
+ throws ThriftHBaseException {
+ try {
+ return server.openScanner(regionName, scan);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public void mutateRow(byte[] regionName, TRowMutations arm)
+ throws ThriftHBaseException {
+ try {
+ server.mutateRow(regionName, RowMutations.Builder.createFromTRowMutations(arm));
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public void mutateRows(byte[] regionName, List<TRowMutations> armList)
+ throws ThriftHBaseException {
+ try {
+ List<RowMutations> rowMutations = new ArrayList<>();
+ for (TRowMutations mutation : armList) {
+ rowMutations.add(RowMutations.Builder.createFromTRowMutations(mutation));
+ }
+ server.mutateRow(regionName, rowMutations);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<Void> mutateRowAsync(byte[] regionName, TRowMutations arm) {
+ try {
+ mutateRow(regionName, arm);
+ return Futures.immediateFuture(null);
+ } catch (ThriftHBaseException e) {
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ @Deprecated
+ public Result next(long scannerId) throws ThriftHBaseException {
+ try {
+ return server.next(scannerId);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public List<Result> nextRows(long scannerId, int numberOfRows)
+ throws ThriftHBaseException {
+ try {
+ Result[] result = server.nextInternal(scannerId, numberOfRows);
+ List<Result> resultList = new ArrayList<>(result.length);
+ for (int i = 0; i < result.length; i ++) {
+ resultList.add(this.addThriftRegionInfoQualifierIfNeeded(result[i]));
+ }
+ return resultList;
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public void close(long scannerId) throws ThriftHBaseException {
+ try {
+ server.close(scannerId);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public long lockRow(byte[] regionName, byte[] row)
+ throws ThriftHBaseException {
+ try {
+ return server.lockRow(regionName, row);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<RowLock> lockRowAsync(byte[] regionName, byte[] row) {
+ try {
+ long lockId = lockRow(regionName, row);
+ return Futures.immediateFuture(new RowLock(row, lockId));
+ } catch (ThriftHBaseException e) {
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public void unlockRow(byte[] regionName, long lockId)
+ throws ThriftHBaseException {
+ try {
+ server.unlockRow(regionName, lockId);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public ListenableFuture<Void> unlockRowAsync(byte[] regionName, long lockId) {
+ try {
+ unlockRow(regionName, lockId);
+ return Futures.immediateFuture(null);
+ } catch (ThriftHBaseException e) {
+ return Futures.immediateFailedFuture(e);
+ }
+ }
+
+ @Override
+ public List<HRegionInfo> getRegionsAssignment() throws ThriftHBaseException {
+ try {
+ return Arrays.asList(server.getRegionsAssignment());
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public HServerInfo getHServerInfo() throws ThriftHBaseException {
+ try {
+ return server.getHServerInfo();
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public TMultiResponse multiAction(MultiAction multi)
+ throws ThriftHBaseException {
+ try {
+ return TMultiResponse.Builder.createFromMultiResponse(server
+ .multiAction(multi));
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public MultiPutResponse multiPut(MultiPut puts) throws ThriftHBaseException {
+ try {
+ return server.multiPut(puts);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public void bulkLoadHFile(String hfilePath, byte[] regionName,
+ byte[] familyName) throws ThriftHBaseException {
+ try {
+ server.bulkLoadHFile(hfilePath, regionName, familyName);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public void bulkLoadHFile(String hfilePath, byte[] regionName,
+ byte[] familyName, boolean assignSeqNum) throws ThriftHBaseException {
+ try {
+ server.bulkLoadHFile(hfilePath, regionName, familyName, assignSeqNum);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public void closeRegion(HRegionInfo hri, boolean reportWhenCompleted)
+ throws ThriftHBaseException {
+ try {
+ server.closeRegion(hri, reportWhenCompleted);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public int updateFavoredNodes(AssignmentPlan plan) throws ThriftHBaseException {
+ try {
+ return server.updateFavoredNodes(plan);
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public void updateConfiguration() {
+ server.updateConfiguration();
+ }
+
+ @Override
+ public void stop(String why) {
+ server.stop(why);
+ }
+
+ @Override
+ public String getStopReason() {
+ return server.getStopReason();
+ }
+
+ @Override
+ public void setNumHDFSQuorumReadThreads(int maxThreads) {
+ server.setNumHDFSQuorumReadThreads(maxThreads);
+ }
+
+ @Override
+ public void setHDFSQuorumReadTimeoutMillis(long timeoutMillis) {
+ server.setHDFSQuorumReadTimeoutMillis(timeoutMillis);
+ }
+
+ @Override
+ public boolean isStopped() {
+ return server.isStopped();
+ }
+
+ @Override
+ public void stopForRestart() {
+ server.stopForRestart();
+ }
+
+ @Override
+ public void close() throws Exception {
+ // Do nothing
+ }
+
+ @Override
+ public String getConfProperty(String paramName) throws ThriftHBaseException {
+ return server.getConfProperty(paramName);
+ }
+
+ @Override
+ public List<Bucket> getHistogram(byte[] regionName)
+ throws ThriftHBaseException {
+ try {
+ List<Bucket> buckets = server.getHistogram(regionName);
+ if (buckets == null) return new ArrayList<Bucket>();
+ return buckets;
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+
+ @Override
+ public List<Bucket> getHistogramForStore(byte[] regionName, byte[] family)
+ throws ThriftHBaseException {
+ try {
+ List<Bucket> buckets = server.getHistogramForStore(regionName, family);
+ if (buckets == null) return new ArrayList<Bucket>();
+ return buckets;
+ } catch (IOException e) {
+ throw new ThriftHBaseException(e);
+ }
+ }
+}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java Wed Mar 12 21:17:13 2014
@@ -84,7 +84,7 @@ public class RegionServerDynamicMetrics
LOG.error(e);
}
}
-
+
public static RegionServerDynamicMetrics newInstance(HRegionServer regionServer) {
RegionServerDynamicMetrics metrics =
new RegionServerDynamicMetrics(regionServer);
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java Wed Mar 12 21:17:13 2014
@@ -47,3 +47,5 @@ public class RegionServerDynamicStatisti
}
}
+
+
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java Wed Mar 12 21:17:13 2014
@@ -59,7 +59,7 @@ public class SchemaConfigured implements
/**
* Creates an instance corresponding to an unknown table and column family.
- * Used in unit tests.
+ * Used in unit tests.
*/
public static SchemaConfigured createUnknown() {
return new SchemaConfigured(null, SchemaMetrics.UNKNOWN,
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java Wed Mar 12 21:17:13 2014
@@ -33,7 +33,6 @@ import java.util.concurrent.ConcurrentMa
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import com.google.common.base.Preconditions;
import org.apache.commons.lang.mutable.MutableDouble;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -47,6 +46,8 @@ import org.apache.hadoop.hbase.regionser
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
+import com.google.common.base.Preconditions;
+
/**
* A collection of metric names in a given column family or a (table, column
* family) combination. The following "dimensions" are supported:
@@ -477,7 +478,7 @@ public class SchemaMetrics {
addToReadTime(BlockCategory.ALL_CATEGORIES, isCompaction, timeMs);
}
}
-
+
/**
* Used to accumulate store metrics across multiple regions in a region
* server. These metrics are not "persistent", i.e. we keep overriding them
@@ -529,7 +530,7 @@ public class SchemaMetrics {
HRegion.incrNumericPersistentMetric(
storeMetricNames[storeMetricType.ordinal()], value);
}
-
+
/**
* Updates metrics for cacheHit/cacheMiss when a block is read.
* @param blockCategory category of the block read
@@ -623,7 +624,7 @@ public class SchemaMetrics {
addToPreloadReadTime(BlockCategory.ALL_CATEGORIES, isCompaction, timeMs);
}
}
-
+
/**
* Updates read time, the number of misses, and the total number of block for preloader
*/
@@ -644,7 +645,7 @@ public class SchemaMetrics {
incrNumericMetric(blockCategory, isCompaction,
BlockMetricType.PRELOAD_CACHE_HIT);
}
-
+
/**
* Adds the given delta to the cache size for the given block category and
* the aggregate metric for all block categories. Updates both the per-CF
@@ -800,15 +801,15 @@ public class SchemaMetrics {
*/
public static String generateSchemaMetricsPrefix(String tableName,
Set<byte[]> families) {
- if (families == null || families.size() == 0 ||
+ if (families == null || families.isEmpty() ||
tableName == null || tableName.length() == 0)
return "";
if (families.size() == 1) {
- return SchemaMetrics.generateSchemaMetricsPrefix(tableName,
+ return SchemaMetrics.generateSchemaMetricsPrefix(tableName,
Bytes.toString(families.iterator().next()));
}
-
+
tableName = getEffectiveTableName(tableName);
StringBuilder sb = new StringBuilder();
@@ -1158,7 +1159,7 @@ public class SchemaMetrics {
return sb.toString();
}
- /** Validates metrics that keep track of the number of cached blocks for each category */
+ /** Validates metrics that keep track of the number of cached blocks for each category */
private static void checkNumBlocksInCache() {
final LruBlockCache cache =
LruBlockCacheFactory.getInstance().getCurrentBlockCacheInstance();
@@ -1185,7 +1186,7 @@ public class SchemaMetrics {
}
public static void clearBlockCacheMetrics() {
- for (SchemaMetrics metrics : tableAndFamilyToMetrics.values()) {
+ for (SchemaMetrics metrics : tableAndFamilyToMetrics.values()) {
for (BlockCategory blockCategory : BlockCategory.values()) {
String key = metrics.getBlockMetricName(blockCategory, DEFAULT_COMPACTION_FLAG,
BlockMetricType.CACHE_NUM_BLOCKS);
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Wed Mar 12 21:17:13 2014
@@ -195,6 +195,7 @@ public class HLog implements Syncable {
Entry next(Entry reuse) throws IOException;
void seek(long pos) throws IOException;
long getPosition() throws IOException;
+ boolean isComplete() throws IOException;
}
public interface Writer {
@@ -2353,4 +2354,4 @@ public class HLog implements Syncable {
logSyncerThread.syncerShuttingDown = true;
logSyncerThread.interrupt();
}
-}
\ No newline at end of file
+}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java Wed Mar 12 21:17:13 2014
@@ -127,9 +127,9 @@ public class HLogKey implements Writable
/**
* Produces a string map for this key. Useful for programmatic use and
- * manipulation of the data stored in an HLogKey, for example, printing
+ * manipulation of the data stored in an HLogKey, for example, printing
* as JSON.
- *
+ *
* @return a Map containing data from this key
*/
public Map<String, Object> toStringMap() {
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java Wed Mar 12 21:17:13 2014
@@ -374,7 +374,7 @@ public class HLogPrettyPrinter {
}
}
- if (actions.size() == 0)
+ if (actions.isEmpty())
continue;
txn.put("actions", actions);
if (outputJSON) {
@@ -484,6 +484,7 @@ public class HLogPrettyPrinter {
Collections.sort(entrySet,
new Comparator<Map.Entry<String, MutableLong>>() {
+ @Override
public int compare(Map.Entry<String, MutableLong> o1,
Map.Entry<String, MutableLong> o2) {
return o2.getValue().compareTo(o1.getValue());
@@ -531,7 +532,7 @@ public class HLogPrettyPrinter {
try {
CommandLine cmd = parser.parse(options, args);
files = cmd.getArgList();
- if (files.size() == 0 || cmd.hasOption("h")) {
+ if (files.isEmpty() || cmd.hasOption("h")) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("HLog filename(s) ", options, true);
System.exit(-1);
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java Wed Mar 12 21:17:13 2014
@@ -74,18 +74,25 @@ public class SequenceFileLogReader imple
static class WALReaderFSDataInputStream extends FSDataInputStream {
private boolean firstGetPosInvocation = true;
private long length;
+ private FSDataInputStream is;
WALReaderFSDataInputStream(final FSDataInputStream is, final long l)
throws IOException {
super(is);
this.length = l;
+ this.is = is;
+ }
+
+ @Override
+ public boolean isUnderConstruction() throws IOException {
+ return is.isUnderConstruction();
}
// This section can be confusing. It is specific to how HDFS works.
// Let me try to break it down. This is the problem:
//
- // 1. HDFS DataNodes update the NameNode about a filename's length
- // on block boundaries or when a file is closed. Therefore,
+ // 1. HDFS DataNodes update the NameNode about a filename's length
+ // on block boundaries or when a file is closed. Therefore,
// if an RS dies, then the NN's fs.getLength() can be out of date
// 2. this.in.available() would work, but it returns int &
// therefore breaks for files > 2GB (happens on big clusters)
@@ -93,7 +100,7 @@ public class SequenceFileLogReader imple
// 4. DFSInputStream is wrapped 2 levels deep : this.in.in
//
// So, here we adjust getPos() using getFileLength() so the
- // SequenceFile.Reader constructor (aka: first invokation) comes out
+ // SequenceFile.Reader constructor (aka: first invokation) comes out
// with the correct end of the file:
// this.end = in.getPos() + length;
@Override
@@ -208,8 +215,8 @@ public class SequenceFileLogReader imple
} catch(Exception e) { /* reflection fail. keep going */ }
String msg = (this.path == null? "": this.path.toString()) +
- ", entryStart=" + entryStart + ", pos=" + pos +
- ((end == Long.MAX_VALUE) ? "" : ", end=" + end) +
+ ", entryStart=" + entryStart + ", pos=" + pos +
+ ((end == Long.MAX_VALUE) ? "" : ", end=" + end) +
", edit=" + this.edit;
// Enhance via reflection so we don't change the original class type
@@ -219,7 +226,12 @@ public class SequenceFileLogReader imple
.newInstance(msg)
.initCause(ioe);
} catch(Exception e) { /* reflection fail. keep going */ }
-
+
return ioe;
}
+
+ @Override
+ public boolean isComplete() throws IOException {
+ return reader.isComplete();
+ }
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperWrapper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperWrapper.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperWrapper.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/replication/ReplicationZookeeperWrapper.java Wed Mar 12 21:17:13 2014
@@ -19,30 +19,29 @@
*/
package org.apache.hadoop.hbase.replication;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicBoolean;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.HQuorumPeer;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.atomic.AtomicBoolean;
-
/**
* This class serves as a helper for all things related to zookeeper
* in replication.
@@ -176,7 +175,7 @@ public class ReplicationZookeeperWrapper
* @return addresses of all region servers
*/
public List<HServerAddress> getPeersAddresses(String peerClusterId) {
- if (this.peerClusters.size() == 0) {
+ if (this.peerClusters.isEmpty()) {
return new ArrayList<HServerAddress>(0);
}
ZooKeeperWrapper zkw = this.peerClusters.get(peerClusterId);
@@ -377,7 +376,7 @@ public class ReplicationZookeeperWrapper
String clusterPath = this.zookeeperWrapper.getZNode(nodePath, cluster);
List<String> hlogs = this.zookeeperWrapper.listZnodes(clusterPath, null);
// That region server didn't have anything to replicate for this cluster
- if (hlogs == null || hlogs.size() == 0) {
+ if (hlogs == null || hlogs.isEmpty()) {
continue;
}
SortedSet<String> logQueue = new TreeSet<String>();
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java Wed Mar 12 21:17:13 2014
@@ -20,7 +20,13 @@
package org.apache.hadoop.hbase.rest;
-import java.io.IOException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.HServerLoad;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
import javax.ws.rs.GET;
import javax.ws.rs.Produces;
@@ -30,15 +36,7 @@ import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.hbase.HServerLoad;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
+import java.io.IOException;
public class StorageClusterStatusResource extends ResourceBase {
private static final Log LOG =
@@ -85,7 +83,7 @@ public class StorageClusterStatusResourc
for (HServerLoad.RegionLoad region: load.getRegionsLoad()) {
node.addRegion(region.getName(), region.getStores(),
region.getStorefiles(), region.getStorefileSizeMB(),
- region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB());
+ region.getMemstoreSizeMB(), region.getStorefileIndexSizeMB());
}
}
for (String name: status.getDeadServerNames()) {
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java Wed Mar 12 21:17:13 2014
@@ -20,20 +20,44 @@
package org.apache.hadoop.hbase.rest.client;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.ResultScannerIterator;
+import org.apache.hadoop.hbase.client.RowLock;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.rest.Constants;
-import org.apache.hadoop.hbase.rest.model.*;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
-import java.io.IOException;
-import java.util.*;
-
/**
* HTable interface to remote tables accessed via REST gateway
*/
@@ -190,14 +214,17 @@ public class RemoteHTable implements HTa
this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000);
}
+ @Override
public byte[] getTableName() {
return name.clone();
}
+ @Override
public Configuration getConfiguration() {
return conf;
}
+ @Override
public HTableDescriptor getTableDescriptor() throws IOException {
StringBuilder sb = new StringBuilder();
sb.append('/');
@@ -228,14 +255,17 @@ public class RemoteHTable implements HTa
throw new IOException("schema request timed out");
}
+ @Override
public void close() throws IOException {
client.shutdown();
}
+ @Override
public Result[] get(List<Get> gets) throws IOException {
throw new IOException("multi get is not supported here");
}
+ @Override
public Result get(Get get) throws IOException {
TimeRange range = get.getTimeRange();
String spec = buildRowSpec(get.getRow(), get.getFamilyMap(),
@@ -272,12 +302,14 @@ public class RemoteHTable implements HTa
throw new IOException("get request timed out");
}
+ @Override
public boolean exists(Get get) throws IOException {
LOG.warn("exists() is really get(), just use get()");
Result result = get(get);
return (result != null && !(result.isEmpty()));
}
+ @Override
public void put(Put put) throws IOException {
CellSetModel model = buildModelFromPut(put);
StringBuilder sb = new StringBuilder();
@@ -308,6 +340,7 @@ public class RemoteHTable implements HTa
throw new IOException("put request timed out");
}
+ @Override
public void put(List<Put> puts) throws IOException {
// this is a trick: The gateway accepts multiple rows in a cell set and
// ignores the row specification in the URI
@@ -365,6 +398,7 @@ public class RemoteHTable implements HTa
throw new IOException("multiput request timed out");
}
+ @Override
public void delete(Delete delete) throws IOException {
String spec = buildRowSpec(delete.getRow(), delete.getFamilyMap(),
delete.getTimeStamp(), delete.getTimeStamp(), 1);
@@ -386,12 +420,14 @@ public class RemoteHTable implements HTa
throw new IOException("delete request timed out");
}
+ @Override
public void delete(List<Delete> deletes) throws IOException {
for (Delete delete: deletes) {
delete(delete);
}
}
+ @Override
public void flushCommits() throws IOException {
// no-op
}
@@ -475,45 +511,9 @@ public class RemoteHTable implements HTa
return results[0];
}
- class Iter implements Iterator<Result> {
-
- Result cache;
-
- public Iter() {
- try {
- cache = Scanner.this.next();
- } catch (IOException e) {
- LOG.warn(StringUtils.stringifyException(e));
- }
- }
-
- @Override
- public boolean hasNext() {
- return cache != null;
- }
-
- @Override
- public Result next() {
- Result result = cache;
- try {
- cache = Scanner.this.next();
- } catch (IOException e) {
- LOG.warn(StringUtils.stringifyException(e));
- cache = null;
- }
- return result;
- }
-
- @Override
- public void remove() {
- throw new RuntimeException("remove() not supported");
- }
-
- }
-
@Override
public Iterator<Result> iterator() {
- return new Iter();
+ return new ResultScannerIterator(this);
}
@Override
@@ -533,16 +533,19 @@ public class RemoteHTable implements HTa
}
+ @Override
public ResultScanner getScanner(Scan scan) throws IOException {
return new Scanner(scan);
}
+ @Override
public ResultScanner getScanner(byte[] family) throws IOException {
Scan scan = new Scan();
scan.addFamily(family);
return new Scanner(scan);
}
+ @Override
public ResultScanner getScanner(byte[] family, byte[] qualifier)
throws IOException {
Scan scan = new Scan();
@@ -550,38 +553,46 @@ public class RemoteHTable implements HTa
return new Scanner(scan);
}
+ @Override
public boolean isAutoFlush() {
return true;
}
+ @Override
public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
throw new IOException("getRowOrBefore not supported");
}
+ @Override
public RowLock lockRow(byte[] row) throws IOException {
throw new IOException("lockRow not implemented");
}
+ @Override
public void unlockRow(RowLock rl) throws IOException {
throw new IOException("unlockRow not implemented");
}
+ @Override
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
byte[] value, Put put) throws IOException {
throw new IOException("checkAndPut not supported");
}
+ @Override
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
byte[] value, Delete delete) throws IOException {
throw new IOException("checkAndDelete not supported");
}
+ @Override
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
long amount) throws IOException {
throw new IOException("incrementColumnValue not supported");
}
+ @Override
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
long amount, boolean writeToWAL) throws IOException {
throw new IOException("incrementColumnValue not supported");
@@ -614,6 +625,7 @@ public class RemoteHTable implements HTa
return null;
}
+ @Override
public Result[] batchGet(List<Get> actions) throws IOException {
// TODO Auto-generated method stub
throw new IOException("batchGet not supported");
Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HBaseNiftyThriftServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HBaseNiftyThriftServer.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HBaseNiftyThriftServer.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HBaseNiftyThriftServer.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+import com.facebook.nifty.codec.DefaultThriftFrameCodecFactory;
+import com.facebook.nifty.codec.ThriftFrameCodecFactory;
+import com.facebook.nifty.core.NiftyTimer;
+import com.facebook.nifty.duplex.TDuplexProtocolFactory;
+import com.facebook.nifty.header.codec.HeaderThriftCodecFactory;
+import com.facebook.nifty.header.protocol.TDuplexHeaderProtocolFactory;
+import com.facebook.nifty.header.protocol.TFacebookCompactProtocol;
+import com.facebook.nifty.processor.NiftyProcessor;
+import com.facebook.swift.codec.ThriftCodecManager;
+import com.facebook.swift.service.ThriftEventHandler;
+import com.facebook.swift.service.ThriftServer;
+import com.facebook.swift.service.ThriftServerConfig;
+import com.facebook.swift.service.ThriftServiceProcessor;
+import com.google.common.collect.ImmutableMap;
+import io.airlift.units.DataSize;
+import io.airlift.units.DataSize.Unit;
+import io.airlift.units.Duration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ipc.HBaseRPC;
+import org.apache.hadoop.hbase.ipc.HBaseRpcMetrics;
+import org.apache.hadoop.hbase.ipc.ThriftClientInterface;
+import org.apache.hadoop.hbase.ipc.thrift.ThriftCallStatsReporter;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TCompactProtocol;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+public class HBaseNiftyThriftServer {
+ public static final ImmutableMap<String, TDuplexProtocolFactory> SWIFT_PROTOCOL_FACTORIES = ImmutableMap
+ .of("binary", TDuplexProtocolFactory
+ .fromSingleFactory(new TBinaryProtocol.Factory()), "compact",
+ TDuplexProtocolFactory
+ .fromSingleFactory(new TCompactProtocol.Factory()), "header",
+ new TDuplexHeaderProtocolFactory(), "fcompact",
+ TDuplexProtocolFactory
+ .fromSingleFactory(new TFacebookCompactProtocol.Factory()));
+
+ public static final ImmutableMap<String, ThriftFrameCodecFactory> SWIFT_FRAME_CODEC_FACTORIES = ImmutableMap
+ .<String, ThriftFrameCodecFactory> of("buffered",
+ new DefaultThriftFrameCodecFactory(), "framed",
+ new DefaultThriftFrameCodecFactory(), "header",
+ new HeaderThriftCodecFactory());
+ // The port at which the server needs to run
+ private int port;
+ private static final Log LOG = LogFactory.getLog(HBaseNiftyThriftServer.class);
+ private ThriftServer server;
+ private HBaseRpcMetrics rpcMetrics;
+ private List<ThriftEventHandler> thriftClientStatsReporterList =
+ new ArrayList<>();
+ private boolean useHeaderProtocol;
+ public static final ThriftCodecManager THRIFT_CODEC_MANAGER =
+ new ThriftCodecManager();
+
+ public HBaseRpcMetrics getRpcMetrics() {
+ return rpcMetrics;
+ }
+
+ public HBaseNiftyThriftServer(Configuration conf, ThriftClientInterface instance, int port) {
+ // / TODO:manukranthk verify whether the port is being used
+ this.port = port;
+ this.useHeaderProtocol = conf.getBoolean(HConstants.USE_HEADER_PROTOCOL,
+ HConstants.DEFAULT_USE_HEADER_PROTOCOL);
+ this.rpcMetrics = new HBaseRpcMetrics(
+ HBaseRPC.Server.classNameBase(instance.getClass().getName()),
+ new Integer(port).toString());
+ // The event handler which pushes down the per-call metrics.
+ ThriftEventHandler reporter = new ThriftCallStatsReporter(conf, rpcMetrics,
+ instance);
+ thriftClientStatsReporterList.add(reporter);
+
+ NiftyProcessor processor = new ThriftServiceProcessor(
+ THRIFT_CODEC_MANAGER, thriftClientStatsReporterList, instance);
+
+ ThriftServerConfig serverConfig = new ThriftServerConfig()
+ .setPort(port)
+ .setMaxFrameSize(
+ new DataSize(conf.getInt(HConstants.SWIFT_MAX_FRAME_SIZE_BYTES,
+ HConstants.SWIFT_MAX_FRAME_SIZE_BYTES_DEFAULT), Unit.BYTE))
+ .setConnectionLimit(
+ conf.getInt(HConstants.SWIFT_CONNECTION_LIMIT,
+ HConstants.SWIFT_CONNECTION_LIMIT_DEFAULT))
+ .setIoThreadCount(
+ conf.getInt(HConstants.SWIFT_IO_THREADS,
+ HConstants.SWIFT_IO_THREADS_DEFAULT))
+ .setWorkerThreads(
+ conf.getInt(HConstants.SWIFT_WORKER_THREADS,
+ HConstants.SWIFT_WORKER_THREADS_DEFAULT))
+ .setIdleConnectionTimeout(
+ new Duration(conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
+ HConstants.DEFAULT_HBASE_RPC_TIMEOUT), TimeUnit.MILLISECONDS));
+
+ if (useHeaderProtocol) {
+ server = new ThriftServer(processor, serverConfig.setProtocolName(
+ "header").setTransportName("header"), new NiftyTimer("thrift"),
+ SWIFT_FRAME_CODEC_FACTORIES, SWIFT_PROTOCOL_FACTORIES);
+ } else {
+ server = new ThriftServer(processor, serverConfig.setProtocolName(
+ "fcompact").setTransportName("framed"), new NiftyTimer("thrift"),
+ SWIFT_FRAME_CODEC_FACTORIES, SWIFT_PROTOCOL_FACTORIES);
+ }
+ server.start();
+ LOG.debug("Starting the thrift server. Listening on port " + this.port);
+ }
+
+ public int getPort() {
+ return this.server.getPort();
+ }
+
+ public void stop() {
+ server.close();
+ }
+}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/HbaseHandlerMetricsProxy.java Wed Mar 12 21:17:13 2014
@@ -73,7 +73,7 @@ public class HbaseHandlerMetricsProxy im
}
return result;
}
-
+
private static long now() {
return System.nanoTime();
}
Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/SelfRetryingListenableFuture.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/SelfRetryingListenableFuture.java?rev=1576909&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/SelfRetryingListenableFuture.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/SelfRetryingListenableFuture.java Wed Mar 12 21:17:13 2014
@@ -0,0 +1,330 @@
+/**
+ * Copyright 2013 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.thrift;
+
+import com.google.common.util.concurrent.AbstractFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningScheduledExecutorService;
+import com.google.common.util.concurrent.SettableFuture;
+import org.apache.commons.lang.mutable.MutableBoolean;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.client.ClientSideDoNotRetryException;
+import org.apache.hadoop.hbase.client.HTableAsync;
+import org.apache.hadoop.hbase.client.PreemptiveFastFailException;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
+import org.apache.hadoop.hbase.client.ServerCallable;
+import org.apache.hadoop.hbase.ipc.HConnectionParams;
+import org.apache.hadoop.hbase.ipc.thrift.exceptions.ThriftHBaseException;
+import org.apache.hadoop.hbase.regionserver.RegionOverloadedException;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Make asynchronous call(s) to the server with built-in retry mechanism
+ *
+ * @param <V> Expeted result type
+ */
+public class SelfRetryingListenableFuture<V> extends AbstractFuture<V> {
+ private final Log LOG = LogFactory.getLog(SelfRetryingListenableFuture.class);
+
+ private final HTableAsync table;
+ private final ServerCallable<ListenableFuture<V>> callable;
+ private final HConnectionParams params;
+ private final ListeningScheduledExecutorService executorService;
+
+ private boolean instantiateRegionLocation = false;
+ private boolean didTry = false;
+
+ private final Runnable attemptWorker = new Runnable() {
+ @Override
+ public void run() {
+ makeAttempt();
+ }
+ };
+
+ private final SettableFuture<V> downstream;
+
+ private int tries = 0;
+ private List<Throwable> exceptions = new ArrayList<>();
+ private RegionOverloadedException roe = null;
+
+ long callStartTime = 0;
+ int serverRequestedRetries = 0;
+ long serverRequestedWaitTime = 0;
+
+ public SelfRetryingListenableFuture(
+ HTableAsync table, ServerCallable<ListenableFuture<V>> callable,
+ HConnectionParams params, ListeningScheduledExecutorService executorService) {
+ this.table = table;
+ this.callable = callable;
+ this.params = params;
+ this.executorService = executorService;
+ this.downstream = SettableFuture.create();
+ }
+
+ /**
+ * Start self-retrying attempts to get result
+ *
+ * @return ListenableFuture for client
+ */
+ public ListenableFuture<V> startFuture() {
+ // Instantiate region location once
+ instantiateRegionLocation = true;
+ callStartTime = System.currentTimeMillis();
+
+ // Make the first attempt using thread pool to make HTableAsync APIs full asynchronous,
+ // because instantiateRegionLocation() might cause some pause.
+ executorService.execute(attemptWorker);
+
+ return downstream;
+ }
+
+ /**
+ * Make a single attempt to get result from server
+ */
+ private void makeAttempt() {
+ if (instantiateRegionLocation) {
+ // Do not tries if region cannot be located. There are enough retries
+ // within instantiateRegionLocation.
+ try {
+ callable.instantiateRegionLocation(false);
+ } catch (Exception e) {
+ setFailure(e);
+ return;
+ }
+ // By default don't instantiate region location for next attempt
+ instantiateRegionLocation = false;
+ }
+
+ tries++;
+ LOG.debug("Try number: " + tries);
+
+ didTry = false;
+ try {
+ ListenableFuture<V> upstream =
+ table.getConnectionAndResetOperationContext().getRegionServerWithoutRetries(callable, false);
+ // The upstream listenable future is created.
+ // We assume at this point the client has tried to communicate with the server.
+ didTry = true;
+ Futures.addCallback(upstream, new FutureCallback<V>() {
+ @Override
+ public void onSuccess(V v) {
+ setSuccess(v);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ // The try block is a safe guard for ensuring the retrying process to
+ // enter a final state and notify its downstream.
+ // If anything is caught here, there must be some bug.
+ try {
+ handleException(t);
+ } catch (Exception e) {
+ LOG.error("Exception should never be caught here!", e);
+ setFailure(e);
+ }
+ }
+ }, executorService);
+ } catch (Exception e) {
+ LOG.error("Cannot create upstream listenable future at the first place", e);
+ handleException(e);
+ }
+ }
+
+ /**
+ * Pass result to client. It's the final state of this listenable future.
+ *
+ * @param v Result from server
+ */
+ private void setSuccess(V v) {
+ downstream.set(v);
+ }
+
+ /**
+ * Pass exception to downstream ListenableFuture. It's the final state of this listenable future.
+ *
+ * @param t The exception for client
+ */
+ private void setFailure(Throwable t) {
+ downstream.setException(t);
+ }
+
+ /**
+ * Unwrap exception if it's from server side and handle all scenarios.
+ *
+ * @param t The exception from either client side or server side
+ */
+ protected void handleException(Throwable t) {
+ if (t instanceof ThriftHBaseException) {
+ Exception e = ((ThriftHBaseException) t).getServerJavaException();
+ LOG.debug("Server defined exception", e);
+ retryOrStop(e);
+ } else if (t instanceof PreemptiveFastFailException) {
+ // Client did not try to contact the server.
+ // Skip exception handling in TableServers and just fast fail to next retry.
+ retryOrStop(t);
+ } else {
+ LOG.debug("Other exception type, detecting if it's need to refresh connection", t);
+ if (!didTry) {
+ // When the call to server is actually made,
+ // try to refresh server connection if it's necessary.
+ try {
+ callable.refreshServerConnection((Exception)t);
+ } catch (Exception e) {
+ // Decide next step according to the original exception. Do not use the this exception
+ // which is wrapped by connection refreshing.
+ }
+ }
+
+ // handleThrowable() will determine whether the client could not communicate with the server.
+ MutableBoolean couldNotCommunicateWithServer = new MutableBoolean(false);
+ try {
+ callable.handleThrowable(t, couldNotCommunicateWithServer);
+ } catch (Exception e) {
+ // Update failure info for fast failure
+ callable.updateFailureInfoForServer(didTry, couldNotCommunicateWithServer.booleanValue());
+ retryOrStop(e);
+ return;
+ }
+ RuntimeException ex = new RuntimeException("Unexpected code path");
+ LOG.error("handleThrowable() should always throw an exception", ex);
+ setFailure(ex);
+ }
+ }
+
+ /**
+ * Decide whether to proceed based on exception type
+ * @param t The exception from either client side or server side
+ */
+ private void retryOrStop(Throwable t) {
+ if (t instanceof DoNotRetryIOException) {
+ if (t.getCause() instanceof NotServingRegionException) {
+ HRegionLocation prevLoc = callable.getLocation();
+ if (prevLoc != null) {
+ table.getConnection().deleteCachedLocation(
+ callable.getTableName(), prevLoc.getRegionInfo().getStartKey(), prevLoc.getServerAddress());
+ }
+ }
+ // So there is no retry
+ setFailure(t);
+ } else if (t instanceof RegionOverloadedException) {
+ roe = (RegionOverloadedException)t;
+ serverRequestedWaitTime = roe.getBackoffTimeMillis();
+
+ // If server requests wait. We will wait for that time, and start
+ // again. Do not count this time/tries against the client retries.
+ if (serverRequestedWaitTime > 0) {
+ serverRequestedRetries++;
+
+ if (serverRequestedRetries > params.getMaxServerRequestedRetries()) {
+ RegionOverloadedException e = RegionOverloadedException.create(
+ roe, exceptions, serverRequestedWaitTime);
+ setFailure(e);
+ return;
+ }
+
+ long pauseTime = serverRequestedWaitTime + callStartTime - System.currentTimeMillis();
+ serverRequestedWaitTime = 0;
+ tries = 0;
+ callStartTime = System.currentTimeMillis();
+ scheduleNextAttempt(pauseTime);
+ return;
+ }
+
+ // If server does not request wait, just do a normal retry without any sleep.
+ // I guess this should not happen?
+ makeAttempt();
+ } else if (t instanceof ClientSideDoNotRetryException ||
+ t instanceof PreemptiveFastFailException) {
+ // No retry for specific exception types
+ setFailure(t);
+ } else {
+ exceptions.add(t);
+
+ if (tries >= params.getNumRetries()) {
+ RetriesExhaustedException ree = new RetriesExhaustedException(
+ callable.getServerName(), callable.getRegionName(), callable.getRow(), tries, exceptions);
+ setFailure(ree);
+ return;
+ }
+
+ HRegionLocation prevLoc = callable.getLocation();
+ if (prevLoc.getRegionInfo() != null) {
+ table.getConnection().deleteCachedLocation(
+ callable.getTableName(), prevLoc.getRegionInfo().getStartKey(), prevLoc.getServerAddress());
+ }
+
+ try {
+ callable.instantiateRegionLocation(false);
+ } catch (Exception e) {
+ exceptions.add(e);
+ RetriesExhaustedException ree = new RetriesExhaustedException(
+ callable.getServerName(), callable.getRegionName(), callable.getRow(), tries, exceptions);
+ // Do not tolerant failure of instantiating region location.
+ setFailure(ree);
+ return;
+ }
+
+ if (prevLoc.getServerAddress().equals(callable.getLocation().getServerAddress())) {
+ // Bail out of the retry loop if we have to wait too long
+ long pauseTime = params.getPauseTime(tries);
+ if ((System.currentTimeMillis() - callStartTime + pauseTime) >
+ params.getRpcRetryTimeout()) {
+ RetriesExhaustedException ree = new RetriesExhaustedException(callable.getServerName(),
+ callable.getRegionName(), callable.getRow(), tries,
+ exceptions);
+ setFailure(ree);
+ return;
+ }
+ LOG.debug("getRegionServerWithoutRetries failed, sleeping for " +
+ pauseTime + "ms. tries = " + tries, t);
+
+ // Reload region location from cache after pause.
+ instantiateRegionLocation = true;
+ scheduleNextAttempt(pauseTime);
+ return;
+ } else {
+ LOG.debug("getRegionServerWithoutRetries failed, " +
+ "region moved from " + prevLoc + " to " + callable.getLocation() +
+ "retrying immediately tries=" + tries, t);
+ }
+
+ makeAttempt();
+ }
+ }
+
+ /**
+ * Schedule next attempt with delay.
+ *
+ * @param pauseTime milliseconds to wait before next attempt
+ */
+ private void scheduleNextAttempt(long pauseTime) {
+ executorService.schedule(attemptWorker, pauseTime, TimeUnit.MILLISECONDS);
+ }
+}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java Wed Mar 12 21:17:13 2014
@@ -121,7 +121,7 @@ public class TBoundedThreadPoolServer ex
@Override
public String toString() {
return "min worker threads=" + minWorkerThreads
- + ", max worker threads=" + maxWorkerThreads
+ + ", max worker threads=" + maxWorkerThreads
+ ", max queued requests=" + maxQueuedRequests;
}
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java Wed Mar 12 21:17:13 2014
@@ -175,7 +175,7 @@ public class ThriftServerRunner implemen
if (confType == null) {
return DEFAULT_SERVER_TYPE;
}
-
+
for (ImplType t : values()) {
if (confType.equals(t.option)) {
return t;
@@ -806,7 +806,7 @@ public class ThriftServerRunner implemen
if (metrics != null) {
metrics.incNumBatchGetRowKeys(rows.size());
}
-
+
// For now, don't support ragged gets, with different columns per row
// Probably pretty sensible indefinitely anyways.
for (ByteBuffer row : rows) {
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java Wed Mar 12 21:17:13 2014
@@ -84,6 +84,7 @@ public class ThriftUtilities {
col.inMemory = in.isInMemory();
col.blockCacheEnabled = in.isBlockCacheEnabled();
col.bloomFilterType = in.getBloomFilterType().toString();
+ col.timeToLive = in.getTimeToLive();
return col;
}
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java Wed Mar 12 21:17:13 2014
@@ -112,7 +112,7 @@ public class AlreadyExists extends TExce
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlreadyExists.class, metaDataMap);
@@ -326,7 +326,7 @@ public class AlreadyExists extends TExce
while (true)
{
schemeField = iprot.readFieldBegin();
- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
@@ -334,7 +334,7 @@ public class AlreadyExists extends TExce
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.message = iprot.readString();
struct.setMessageIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java Wed Mar 12 21:17:13 2014
@@ -116,10 +116,10 @@ public class BatchMutation implements or
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text")));
- tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Mutation.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BatchMutation.class, metaDataMap);
@@ -437,7 +437,7 @@ public class BatchMutation implements or
while (true)
{
schemeField = iprot.readFieldBegin();
- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
@@ -445,7 +445,7 @@ public class BatchMutation implements or
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.row = iprot.readBinary();
struct.setRowIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -464,7 +464,7 @@ public class BatchMutation implements or
iprot.readListEnd();
}
struct.setMutationsIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java Wed Mar 12 21:17:13 2014
@@ -160,23 +160,23 @@ public class ColumnDescriptor implements
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text")));
- tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.MAX_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("maxVersions", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
- tmpMap.put(_Fields.COMPRESSION, new org.apache.thrift.meta_data.FieldMetaData("compression", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.COMPRESSION, new org.apache.thrift.meta_data.FieldMetaData("compression", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.IN_MEMORY, new org.apache.thrift.meta_data.FieldMetaData("inMemory", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.IN_MEMORY, new org.apache.thrift.meta_data.FieldMetaData("inMemory", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
- tmpMap.put(_Fields.BLOOM_FILTER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("bloomFilterType", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.BLOOM_FILTER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("bloomFilterType", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
- tmpMap.put(_Fields.BLOOM_FILTER_VECTOR_SIZE, new org.apache.thrift.meta_data.FieldMetaData("bloomFilterVectorSize", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.BLOOM_FILTER_VECTOR_SIZE, new org.apache.thrift.meta_data.FieldMetaData("bloomFilterVectorSize", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
- tmpMap.put(_Fields.BLOOM_FILTER_NB_HASHES, new org.apache.thrift.meta_data.FieldMetaData("bloomFilterNbHashes", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.BLOOM_FILTER_NB_HASHES, new org.apache.thrift.meta_data.FieldMetaData("bloomFilterNbHashes", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
- tmpMap.put(_Fields.BLOCK_CACHE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData("blockCacheEnabled", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.BLOCK_CACHE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData("blockCacheEnabled", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
- tmpMap.put(_Fields.TIME_TO_LIVE, new org.apache.thrift.meta_data.FieldMetaData("timeToLive", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.TIME_TO_LIVE, new org.apache.thrift.meta_data.FieldMetaData("timeToLive", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnDescriptor.class, metaDataMap);
@@ -991,7 +991,7 @@ public class ColumnDescriptor implements
while (true)
{
schemeField = iprot.readFieldBegin();
- if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
@@ -999,7 +999,7 @@ public class ColumnDescriptor implements
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.name = iprot.readBinary();
struct.setNameIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -1007,7 +1007,7 @@ public class ColumnDescriptor implements
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.maxVersions = iprot.readI32();
struct.setMaxVersionsIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -1015,7 +1015,7 @@ public class ColumnDescriptor implements
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.compression = iprot.readString();
struct.setCompressionIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -1023,7 +1023,7 @@ public class ColumnDescriptor implements
if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
struct.inMemory = iprot.readBool();
struct.setInMemoryIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -1031,7 +1031,7 @@ public class ColumnDescriptor implements
if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
struct.bloomFilterType = iprot.readString();
struct.setBloomFilterTypeIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -1039,7 +1039,7 @@ public class ColumnDescriptor implements
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.bloomFilterVectorSize = iprot.readI32();
struct.setBloomFilterVectorSizeIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -1047,7 +1047,7 @@ public class ColumnDescriptor implements
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.bloomFilterNbHashes = iprot.readI32();
struct.setBloomFilterNbHashesIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -1055,7 +1055,7 @@ public class ColumnDescriptor implements
if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
struct.blockCacheEnabled = iprot.readBool();
struct.setBlockCacheEnabledIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
@@ -1063,7 +1063,7 @@ public class ColumnDescriptor implements
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.timeToLive = iprot.readI32();
struct.setTimeToLiveIsSet(true);
- } else {
+ } else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;