You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/08/28 05:40:49 UTC
svn commit: r1377965 [1/3] - in /hbase/trunk:
hbase-common/src/main/java/org/apache/hadoop/hbase/
hbase-server/src/main/java/org/apache/hadoop/hbase/
hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/
hbase-server/src/main/java/org/apache/hado...
Author: stack
Date: Tue Aug 28 03:40:47 2012
New Revision: 1377965
URL: http://svn.apache.org/viewvc?rev=1377965&view=rev
Log:
HBASE-6052 Convert .META. and -ROOT- content to pb
Added:
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java
hbase/trunk/hbase-server/src/test/data/TestMetaMigrationConvertToPB.tgz (with props)
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/MetaMockingUtil.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java
Removed:
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaMigrationRemovingHTD.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestMigrationFrom090To092.java
Modified:
hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Writables.java
hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/Mocking.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
Modified: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java Tue Aug 28 03:40:47 2012
@@ -347,12 +347,15 @@ public final class HConstants {
/**
* The current version of the meta table.
- * Before this the meta had HTableDescriptor serialized into the HRegionInfo;
- * i.e. pre-hbase 0.92. There was no META_VERSION column in the root table
- * in this case. The presence of a version and its value being zero indicates
- * meta is up-to-date.
+ * - pre-hbase 0.92. There is no META_VERSION column in the root table
+ * in this case. The meta has HTableDescriptor serialized into the HRegionInfo;
+ * - version 0 is 0.92 and 0.94. Meta data has serialized HRegionInfo's using
+ * Writable serialization, and HRegionInfo's does not contain HTableDescriptors.
+ * - version 1 for 0.96+ keeps HRegionInfo data structures, but changes the
+ * byte[] serialization from Writables to Protobuf.
+ * See HRegionInfo.VERSION
*/
- public static final short META_VERSION = 0;
+ public static final short META_VERSION = 1;
// Other constants
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java Tue Aug 28 03:40:47 2012
@@ -19,13 +19,16 @@
*/
package org.apache.hadoop.hbase;
-import java.io.BufferedInputStream;
+import java.io.ByteArrayInputStream;
import java.io.DataInput;
+import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.EOFException;
import java.io.IOException;
-import java.io.InputStream;
+import java.io.SequenceInputStream;
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -36,7 +39,7 @@ import org.apache.hadoop.fs.FSDataInputS
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.KeyValue.KVComparator;
-import org.apache.hadoop.hbase.migration.HRegionInfo090x;
+import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
@@ -44,9 +47,9 @@ import org.apache.hadoop.hbase.util.Byte
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.hbase.util.MD5Hash;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.VersionedWritable;
-import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.hadoop.io.DataInputBuffer;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
@@ -57,10 +60,30 @@ import com.google.protobuf.InvalidProtoc
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
-public class HRegionInfo extends VersionedWritable
-implements WritableComparable<HRegionInfo> {
- // VERSION == 0 when HRegionInfo had an HTableDescriptor inside it.
- public static final byte VERSION_PRE_092 = 0;
+public class HRegionInfo implements Comparable<HRegionInfo> {
+ /*
+ * There are two versions associated with HRegionInfo: HRegionInfo.VERSION and
+ * HConstants.META_VERSION. HRegionInfo.VERSION indicates the data structure's versioning
+ * while HConstants.META_VERSION indicates the versioning of the serialized HRIs stored in
+ * the META table.
+ *
+ * Pre-0.92:
+ * HRI.VERSION == 0 and HConstants.META_VERSION does not exist (is not stored at META table)
+ * HRegionInfo had an HTableDescriptor reference inside it.
+ * HRegionInfo is serialized as Writable to META table.
+ * For 0.92.x and 0.94.x:
+ * HRI.VERSION == 1 and HConstants.META_VERSION == 0
+ * HRI no longer has HTableDescriptor in it.
+ * HRI is serialized as Writable to META table.
+ * For 0.96.x:
+ * HRI.VERSION == 1 and HConstants.META_VERSION == 1
+ * HRI data structure is the same as 0.92 and 0.94
+ * HRI is serialized as PB to META table.
+ *
+ * Versioning of HRegionInfo is deprecated. HRegionInfo does protobuf
+ * serialization using RegionInfo class, which has it's own versioning.
+ */
+ @Deprecated
public static final byte VERSION = 1;
private static final Log LOG = LogFactory.getLog(HRegionInfo.class);
@@ -74,11 +97,11 @@ implements WritableComparable<HRegionInf
* where,
* <encodedName> is a hex version of the MD5 hash of
* <tablename>,<startkey>,<regionIdTimestamp>
- *
+ *
* The old region name format:
* <tablename>,<startkey>,<regionIdTimestamp>
* For region names in the old format, the encoded name is a 32-bit
- * JenkinsHash integer value (in its decimal notation, string form).
+ * JenkinsHash integer value (in its decimal notation, string form).
*<p>
* **NOTE**
*
@@ -88,8 +111,8 @@ implements WritableComparable<HRegionInf
*/
/** Separator used to demarcate the encodedName in a region name
- * in the new format. See description on new format above.
- */
+ * in the new format. See description on new format above.
+ */
private static final int ENC_SEPARATOR = '.';
public static final int MD5_HEX_LENGTH = 32;
@@ -104,11 +127,11 @@ implements WritableComparable<HRegionInf
if ((regionName.length >= 1)
&& (regionName[regionName.length - 1] == ENC_SEPARATOR)) {
// region name is new format. it contains the encoded name.
- return true;
+ return true;
}
return false;
}
-
+
/**
* @param regionName
* @return the encodedName
@@ -122,7 +145,7 @@ implements WritableComparable<HRegionInf
regionName.length - MD5_HEX_LENGTH - 1,
MD5_HEX_LENGTH);
} else {
- // old format region name. ROOT and first META region also
+ // old format region name. ROOT and first META region also
// use this format.EncodedName is the JenkinsHash value.
int hashVal = Math.abs(JenkinsHash.getInstance().hash(regionName,
regionName.length, 0));
@@ -208,24 +231,6 @@ implements WritableComparable<HRegionInf
super();
}
- /**
- * Used only for migration
- * @param other HRegionInfoForMigration
- */
- public HRegionInfo(HRegionInfo090x other) {
- super();
- this.endKey = other.getEndKey();
- this.offLine = other.isOffline();
- this.regionId = other.getRegionId();
- this.regionName = other.getRegionName();
- this.regionNameStr = Bytes.toStringBinary(this.regionName);
- this.split = other.isSplit();
- this.startKey = other.getStartKey();
- this.hashCode = other.hashCode();
- this.encodedName = other.getEncodedName();
- this.tableName = other.getTableDesc().getName();
- }
-
public HRegionInfo(final byte[] tableName) {
this(tableName, null, null);
}
@@ -382,7 +387,7 @@ implements WritableComparable<HRegionInf
if (md5HashBytes.length != MD5_HEX_LENGTH) {
LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH +
- "; Got=" + md5HashBytes.length);
+ "; Got=" + md5HashBytes.length);
}
// now append the bytes '.<encodedName>.' to the end
@@ -391,7 +396,7 @@ implements WritableComparable<HRegionInf
offset += MD5_HEX_LENGTH;
b[offset++] = ENC_SEPARATOR;
}
-
+
return b;
}
@@ -502,7 +507,7 @@ implements WritableComparable<HRegionInf
public byte [] getStartKey(){
return startKey;
}
-
+
/** @return the endKey */
public byte [] getEndKey(){
return endKey;
@@ -702,8 +707,9 @@ implements WritableComparable<HRegionInf
return this.hashCode;
}
- /** @return the object version number */
- @Override
+ /** @return the object version number
+ * @deprecated HRI is no longer a VersionedWritable */
+ @Deprecated
public byte getVersion() {
return VERSION;
}
@@ -713,9 +719,8 @@ implements WritableComparable<HRegionInf
* {@link #toDelimitedByteArray()}
*/
@Deprecated
- @Override
public void write(DataOutput out) throws IOException {
- super.write(out);
+ out.writeByte(getVersion());
Bytes.writeByteArray(out, endKey);
out.writeBoolean(offLine);
out.writeLong(regionId);
@@ -731,7 +736,6 @@ implements WritableComparable<HRegionInf
* {@link #parseFrom(FSDataInputStream)}
*/
@Deprecated
- @Override
public void readFields(DataInput in) throws IOException {
// Read the single version byte. We don't ask the super class do it
// because freaks out if its not the current classes' version. This method
@@ -770,6 +774,21 @@ implements WritableComparable<HRegionInf
}
}
+ @Deprecated
+ private void readFields(byte[] bytes) throws IOException {
+ if (bytes == null || bytes.length <= 0) {
+ throw new IllegalArgumentException("Can't build a writable with empty " +
+ "bytes array");
+ }
+ DataInputBuffer in = new DataInputBuffer();
+ try {
+ in.reset(bytes, 0, bytes.length);
+ this.readFields(in);
+ } finally {
+ in.close();
+ }
+ }
+
//
// Comparable
//
@@ -817,7 +836,7 @@ implements WritableComparable<HRegionInf
if (this.offLine == o.offLine)
return 0;
if (this.offLine == true) return -1;
-
+
return 1;
}
@@ -919,7 +938,7 @@ implements WritableComparable<HRegionInf
/**
* @param bytes A pb RegionInfo serialized with a pb magic prefix.
- * @return
+ * @return A deserialized {@link HRegionInfo}
* @throws DeserializationException
* @see {@link #toByteArray()}
*/
@@ -935,7 +954,9 @@ implements WritableComparable<HRegionInf
}
} else {
try {
- return (HRegionInfo)Writables.getWritable(bytes, new HRegionInfo());
+ HRegionInfo hri = new HRegionInfo();
+ hri.readFields(bytes);
+ return hri;
} catch (IOException e) {
throw new DeserializationException(e);
}
@@ -954,29 +975,172 @@ implements WritableComparable<HRegionInf
}
/**
+ * Extract a HRegionInfo and ServerName from catalog table {@link Result}.
+ * @param r Result to pull from
+ * @return A pair of the {@link HRegionInfo} and the {@link ServerName}
+ * (or null for server address if no address set in .META.).
+ * @throws IOException
+ */
+ public static Pair<HRegionInfo, ServerName> getHRegionInfoAndServerName(final Result r) {
+ HRegionInfo info =
+ getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
+ ServerName sn = getServerName(r);
+ return new Pair<HRegionInfo, ServerName>(info, sn);
+ }
+
+ /**
+ * Returns HRegionInfo object from the column
+ * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
+ * table Result.
+ * @param data a Result object from the catalog table scan
+ * @return HRegionInfo or null
+ */
+ public static HRegionInfo getHRegionInfo(Result data) {
+ byte [] bytes =
+ data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+ if (bytes == null) return null;
+ HRegionInfo info = parseFromOrNull(bytes);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Current INFO from scan results = " + info);
+ }
+ return info;
+ }
+
+ /**
+ * Returns the daughter regions by reading the corresponding columns of the catalog table
+ * Result.
+ * @param data a Result object from the catalog table scan
+ * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
+ * parent
+ */
+ public static PairOfSameType<HRegionInfo> getDaughterRegions(Result data) throws IOException {
+ HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER);
+ HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER);
+
+ return new PairOfSameType<HRegionInfo>(splitA, splitB);
+ }
+
+ /**
+ * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
+ * <code>qualifier</code> of the catalog table result.
+ * @param r a Result object from the catalog table scan
+ * @param qualifier Column family qualifier -- either
+ * {@link HConstants#SPLITA_QUALIFIER}, {@link HConstants#SPLITB_QUALIFIER} or
+ * {@link HConstants#REGIONINFO_QUALIFIER}.
+ * @return An HRegionInfo instance or null.
+ * @throws IOException
+ */
+ public static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) {
+ byte [] bytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
+ if (bytes == null || bytes.length <= 0) return null;
+ return parseFromOrNull(bytes);
+ }
+
+ /**
+ * Returns a {@link ServerName} from catalog table {@link Result}.
+ * @param r Result to pull from
+ * @return A ServerName instance or null if necessary fields not found or empty.
+ */
+ public static ServerName getServerName(final Result r) {
+ byte[] value = r.getValue(HConstants.CATALOG_FAMILY,
+ HConstants.SERVER_QUALIFIER);
+ if (value == null || value.length == 0) return null;
+ String hostAndPort = Bytes.toString(value);
+ value = r.getValue(HConstants.CATALOG_FAMILY,
+ HConstants.STARTCODE_QUALIFIER);
+ if (value == null || value.length == 0) return null;
+ return new ServerName(hostAndPort, Bytes.toLong(value));
+ }
+
+ /**
* Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was
* serialized to the stream with {@link #toDelimitedByteArray()}
* @param in
* @return An instance of HRegionInfo.
* @throws IOException
*/
- public static HRegionInfo parseFrom(final FSDataInputStream in) throws IOException {
+ public static HRegionInfo parseFrom(final DataInputStream in) throws IOException {
// I need to be able to move back in the stream if this is not a pb serialization so I can
// do the Writable decoding instead.
- InputStream is = in.markSupported()? in: new BufferedInputStream(in);
int pblen = ProtobufUtil.lengthOfPBMagic();
- is.mark(pblen);
byte [] pbuf = new byte[pblen];
- int read = is.read(pbuf);
+ if (in.markSupported()) { //read it with mark()
+ in.mark(pblen);
+ }
+ int read = in.read(pbuf); //assumption: if Writable serialization, it should be longer than pblen.
if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen);
if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
- return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(is));
+ return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(in));
} else {
- // Presume Writables. Need to reset the stream since it didn't start w/ pb.
- in.reset();
- HRegionInfo hri = new HRegionInfo();
- hri.readFields(in);
- return hri;
+ // Presume Writables. Need to reset the stream since it didn't start w/ pb.
+ if (in.markSupported()) {
+ in.reset();
+ HRegionInfo hri = new HRegionInfo();
+ hri.readFields(in);
+ return hri;
+ } else {
+ //we cannot use BufferedInputStream, it consumes more than we read from the underlying IS
+ ByteArrayInputStream bais = new ByteArrayInputStream(pbuf);
+ SequenceInputStream sis = new SequenceInputStream(bais, in); //concatenate input streams
+ HRegionInfo hri = new HRegionInfo();
+ hri.readFields(new DataInputStream(sis));
+ return hri;
+ }
}
}
+
+ /**
+ * Serializes given HRegionInfo's as a byte array. Use this instead of {@link #toByteArray()} when
+ * writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads
+ * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can
+ * be used to read back the instances.
+ * @param infos HRegionInfo objects to serialize
+ * @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
+ * @throws IOException
+ * @see {@link #toByteArray()}
+ */
+ public static byte[] toDelimitedByteArray(HRegionInfo... infos) throws IOException {
+ byte[][] bytes = new byte[infos.length][];
+ int size = 0;
+ for (int i = 0; i < infos.length; i++) {
+ bytes[i] = infos[i].toDelimitedByteArray();
+ size += bytes[i].length;
+ }
+
+ byte[] result = new byte[size];
+ int offset = 0;
+ for (byte[] b : bytes) {
+ System.arraycopy(b, 0, result, offset, b.length);
+ offset += b.length;
+ }
+ return result;
+ }
+
+ /**
+ * Parses all the HRegionInfo instances from the passed in stream until EOF. Presumes the
+ * HRegionInfo's were serialized to the stream with {@link #toDelimitedByteArray()}
+ * @param bytes serialized bytes
+ * @param offset the start offset into the byte[] buffer
+ * @param length how far we should read into the byte[] buffer
+ * @return All the hregioninfos that are in the byte array. Keeps reading till we hit the end.
+ */
+ public static List<HRegionInfo> parseDelimitedFrom(final byte[] bytes, final int offset,
+ final int length) throws IOException {
+ if (bytes == null) {
+ throw new IllegalArgumentException("Can't build an object with empty bytes array");
+ }
+ DataInputBuffer in = new DataInputBuffer();
+ List<HRegionInfo> hris = new ArrayList<HRegionInfo>();
+ try {
+ in.reset(bytes, offset, length);
+ while (in.available() > 0) {
+ HRegionInfo hri = parseFrom(in);
+ hris.add(hri);
+ }
+ } finally {
+ in.close();
+ }
+ return hris;
+ }
+
}
\ No newline at end of file
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java Tue Aug 28 03:40:47 2012
@@ -32,15 +32,12 @@ import org.apache.hadoop.hbase.ServerNam
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.hadoop.hbase.util.Writables;
/**
* Writes region and assignment information to <code>.META.</code>.
* TODO: Put MetaReader and MetaEditor together; doesn't make sense having
- * them distinct.
+ * them distinct. see HBASE-3475.
*/
@InterfaceAudience.Private
public class MetaEditor {
@@ -49,11 +46,26 @@ public class MetaEditor {
// Connection.
private static final Log LOG = LogFactory.getLog(MetaEditor.class);
- private static Put makePutFromRegionInfo(HRegionInfo regionInfo)
+ /**
+ * Generates and returns a Put containing the region into for the catalog table
+ */
+ public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
throws IOException {
Put put = new Put(regionInfo.getRegionName());
- put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
- Writables.getBytes(regionInfo));
+ addRegionInfo(put, regionInfo);
+ return put;
+ }
+
+ /**
+ * Adds split daughters to the Put
+ */
+ public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
+ if (splitA != null) {
+ put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
+ }
+ if (splitB != null) {
+ put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
+ }
return put;
}
@@ -149,6 +161,39 @@ public class MetaEditor {
}
/**
+ * Adds a META row for the specified new region to the given catalog table. The
+ * HTable is not flushed or closed.
+ * @param meta the HTable for META
+ * @param regionInfo region information
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo) throws IOException {
+ addRegionToMeta(meta, regionInfo, null, null);
+ }
+
+ /**
+ * Adds a (single) META row for the specified new region and its daughters. Note that this does
+ * not add its daughter's as different rows, but adds information about the daughters
+ * in the same row as the parent. Use
+ * {@link #offlineParentInMeta(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo)}
+ * and {@link #addDaughter(CatalogTracker, HRegionInfo, ServerName)} if you want to do that.
+ * @param meta the HTable for META
+ * @param regionInfo region information
+ * @param splitA first split daughter of the parent regionInfo
+ * @param splitB second split daughter of the parent regionInfo
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo,
+ HRegionInfo splitA, HRegionInfo splitB) throws IOException {
+ Put put = makePutFromRegionInfo(regionInfo);
+ addDaughtersToPut(put, splitA, splitB);
+ meta.put(put);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Added region " + regionInfo.getRegionNameAsString() + " to META");
+ }
+ }
+
+ /**
* Adds a META row for each of the specified new regions.
* @param catalogTracker CatalogTracker
* @param regionInfos region information list
@@ -181,15 +226,14 @@ public class MetaEditor {
HRegionInfo copyOfParent = new HRegionInfo(parent);
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
- Put put = new Put(copyOfParent.getRegionName());
- addRegionInfo(put, copyOfParent);
- put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
- Writables.getBytes(a));
- put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
- Writables.getBytes(b));
- putToMetaTable(catalogTracker, put);
- LOG.info("Offlined parent region " + parent.getRegionNameAsString() +
- " in META");
+ HTable meta = MetaReader.getMetaHTable(catalogTracker);
+ try {
+ addRegionToMeta(meta, copyOfParent, a, b);
+ LOG.info("Offlined parent region " + parent.getRegionNameAsString() +
+ " in META");
+ } finally {
+ meta.close();
+ }
}
public static void addDaughter(final CatalogTracker catalogTracker,
@@ -297,32 +341,10 @@ public class MetaEditor {
", from parent " + parent.getRegionNameAsString());
}
- public static HRegionInfo getHRegionInfo(
- Result data) throws IOException {
- byte [] bytes =
- data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
- if (bytes == null) return null;
- HRegionInfo info = Writables.getHRegionInfo(bytes);
- LOG.info("Current INFO from scan results = " + info);
- return info;
- }
-
- /**
- * Returns the daughter regions by reading from the corresponding columns of the .META. table
- * Result. If the region is not a split parent region, it returns PairOfSameType(null, null).
- */
- public static PairOfSameType<HRegionInfo> getDaughterRegions(Result data) throws IOException {
- HRegionInfo splitA = Writables.getHRegionInfoOrNull(
- data.getValue(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER));
- HRegionInfo splitB = Writables.getHRegionInfoOrNull(
- data.getValue(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER));
- return new PairOfSameType<HRegionInfo>(splitA, splitB);
- }
-
private static Put addRegionInfo(final Put p, final HRegionInfo hri)
throws IOException {
p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
- Writables.getBytes(hri));
+ hri.toByteArray());
return p;
}
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java?rev=1377965&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java Tue Aug 28 03:40:47 2012
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.catalog;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DeserializationException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * A tool to migrate the data stored in ROOT and META tables to pbuf serialization.
+ * Supports migrating from 0.92.x and 0.94.x to 0.96.x for the catalog tables.
+ * @deprecated will be removed for the major release after 0.96.
+ */
+@Deprecated
+public class MetaMigrationConvertingToPB {
+
+ private static final Log LOG = LogFactory.getLog(MetaMigrationConvertingToPB.class);
+
+ private static class ConvertToPBMetaVisitor implements Visitor {
+ private final MasterServices services;
+ private long numMigratedRows;
+
+ public ConvertToPBMetaVisitor(MasterServices services) {
+ this.services = services;
+ numMigratedRows = 0;
+ }
+
+ @Override
+ public boolean visit(Result r) throws IOException {
+ if (r == null || r.isEmpty()) return true;
+ // Check info:regioninfo, info:splitA, and info:splitB. Make sure all
+ // have migrated HRegionInfos.
+ byte [] hriBytes = getBytes(r, HConstants.REGIONINFO_QUALIFIER);
+ // Presumes that an edit updating all three cells either succeeds or
+ // doesn't -- that we don't have case of info:regioninfo migrated but not
+ // info:splitA.
+ if (isMigrated(hriBytes)) return true;
+ // OK. Need to migrate this row in meta.
+
+ //This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
+ //writable serialization
+ HRegionInfo hri = parseFrom(hriBytes);
+
+ // Now make a put to write back to meta.
+ Put p = MetaEditor.makePutFromRegionInfo(hri);
+
+ // Now migrate info:splitA and info:splitB if they are not null
+ migrateSplitIfNecessary(r, p, HConstants.SPLITA_QUALIFIER);
+ migrateSplitIfNecessary(r, p, HConstants.SPLITB_QUALIFIER);
+
+ MetaEditor.putToCatalogTable(this.services.getCatalogTracker(), p);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Migrated " + Bytes.toString(p.getRow()));
+ }
+ numMigratedRows++;
+ return true;
+ }
+ }
+
+ static void migrateSplitIfNecessary(final Result r, final Put p, final byte [] which)
+ throws IOException {
+ byte [] hriSplitBytes = getBytes(r, which);
+ if (!isMigrated(hriSplitBytes)) {
+ //This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
+ //writable serialization
+ HRegionInfo hri = parseFrom(hriSplitBytes);
+ p.add(HConstants.CATALOG_FAMILY, which, hri.toByteArray());
+ }
+ }
+
+ static HRegionInfo parseFrom(byte[] hriBytes) throws IOException {
+ try {
+ return HRegionInfo.parseFrom(hriBytes);
+ } catch (DeserializationException ex) {
+ throw new IOException(ex);
+ }
+ }
+
+ /**
+ * @param r Result to dig in.
+ * @param qualifier Qualifier to look at in the passed <code>r</code>.
+ * @return Bytes for an HRegionInfo or null if no bytes or empty bytes found.
+ */
+ static byte [] getBytes(final Result r, final byte [] qualifier) {
+ byte [] hriBytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
+ if (hriBytes == null || hriBytes.length <= 0) return null;
+ return hriBytes;
+ }
+
+ static boolean isMigrated(final byte [] hriBytes) {
+ if (hriBytes == null || hriBytes.length <= 0) return true;
+
+ return ProtobufUtil.isPBMagicPrefix(hriBytes);
+ }
+
+ /**
+ * Update ROOT and META to newer version, converting writable serialization to PB, if
+ * it is needed.
+ * @param services MasterServices to get a handle on master
+ * @return num migrated rows
+ * @throws IOException or RuntimeException if something goes wrong
+ */
+ public static long updateRootAndMetaIfNecessary(final MasterServices services)
+ throws IOException {
+ if (isMetaHRIUpdated(services.getCatalogTracker())) {
+ LOG.info("ROOT/META already up-to date with PB serialization");
+ return 0;
+ }
+ LOG.info("ROOT/META has Writable serializations, migrating ROOT and META to PB serialization");
+ try {
+ long rows = updateRootAndMeta(services);
+ LOG.info("ROOT and META updated with PB serialization. Total rows updated: " + rows);
+ return rows;
+ } catch (IOException e) {
+ LOG.warn("Update ROOT/META with PB serialization failed." +
+ "Master startup aborted.");
+ throw e;
+ }
+ }
+
+ /**
+ * Update ROOT and META to newer version, converting writable serialization to PB
+ * @return num migrated rows
+ */
+ static long updateRootAndMeta(final MasterServices masterServices)
+ throws IOException {
+ long rows = updateRoot(masterServices);
+ rows += updateMeta(masterServices);
+ return rows;
+ }
+
+ /**
+ * Update ROOT rows, converting writable serialization to PB
+ * @return num migrated rows
+ */
+ static long updateRoot(final MasterServices masterServices)
+ throws IOException {
+ LOG.info("Starting update of ROOT");
+ ConvertToPBMetaVisitor v = new ConvertToPBMetaVisitor(masterServices);
+ MetaReader.fullScan(masterServices.getCatalogTracker(), v, null, true);
+ LOG.info("Finished update of ROOT. Total rows updated:" + v.numMigratedRows);
+ return v.numMigratedRows;
+ }
+
+ /**
+ * Update META rows, converting writable serialization to PB
+ * @return num migrated rows
+ */
+ static long updateMeta(final MasterServices masterServices) throws IOException {
+ LOG.info("Starting update of META");
+ ConvertToPBMetaVisitor v = new ConvertToPBMetaVisitor(masterServices);
+ MetaReader.fullScan(masterServices.getCatalogTracker(), v);
+ updateRootWithMetaMigrationStatus(masterServices.getCatalogTracker());
+ LOG.info("Finished update of META. Total rows updated:" + v.numMigratedRows);
+ return v.numMigratedRows;
+ }
+
+ /**
+ * Update the version flag in -ROOT-.
+ * @param catalogTracker the catalog tracker
+ * @throws IOException
+ */
+ static void updateRootWithMetaMigrationStatus(final CatalogTracker catalogTracker)
+ throws IOException {
+ Put p = new Put(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
+ p.add(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER,
+ Bytes.toBytes(HConstants.META_VERSION));
+ MetaEditor.putToRootTable(catalogTracker, p);
+ LOG.info("Updated -ROOT- meta version=" + HConstants.META_VERSION);
+ }
+
+ /**
+ * @param catalogTracker the catalog tracker
+ * @return True if the meta table has been migrated.
+ * @throws IOException
+ */
+ static boolean isMetaHRIUpdated(final CatalogTracker catalogTracker) throws IOException {
+ List<Result> results = MetaReader.fullScanOfRoot(catalogTracker);
+ if (results == null || results.isEmpty()) {
+ LOG.info(".META. is not migrated");
+ return false;
+ }
+ // Presume only the one result because we only support one meta region.
+ Result r = results.get(0);
+ byte [] value = r.getValue(HConstants.CATALOG_FAMILY,
+ HConstants.META_VERSION_QUALIFIER);
+ short version = value == null || value.length <= 0? -1: Bytes.toShort(value);
+
+ boolean migrated = version >= HConstants.META_VERSION;
+ LOG.info("Meta version=" + version + "; migrated=" + migrated);
+ return migrated;
+ }
+}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java Tue Aug 28 03:40:47 2012
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.client.Re
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Writables;
/**
* Reads region and assignment information from <code>.META.</code>.
@@ -114,7 +113,7 @@ public class MetaReader {
@Override
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
- Pair<HRegionInfo, ServerName> region = parseCatalogResult(r);
+ Pair<HRegionInfo, ServerName> region = HRegionInfo.getHRegionInfoAndServerName(r);
if (region == null) return true;
HRegionInfo hri = region.getFirst();
if (hri == null) return true;
@@ -202,7 +201,7 @@ public class MetaReader {
/**
* Callers should call close on the returned {@link HTable} instance.
* @param catalogTracker
- * @param row Row we are putting
+ * @param row Row we are putting
* @return
* @throws IOException
*/
@@ -289,59 +288,7 @@ public class MetaReader {
Get get = new Get(regionName);
get.addFamily(HConstants.CATALOG_FAMILY);
Result r = get(getCatalogHTable(catalogTracker, regionName), get);
- return (r == null || r.isEmpty())? null: parseCatalogResult(r);
- }
-
- /**
- * Extract a {@link ServerName}
- * For use on catalog table {@link Result}.
- * @param r Result to pull from
- * @return A ServerName instance or null if necessary fields not found or empty.
- */
- public static ServerName getServerNameFromCatalogResult(final Result r) {
- byte[] value = r.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER);
- if (value == null || value.length == 0) return null;
- String hostAndPort = Bytes.toString(value);
- value = r.getValue(HConstants.CATALOG_FAMILY,
- HConstants.STARTCODE_QUALIFIER);
- if (value == null || value.length == 0) return null;
- return new ServerName(hostAndPort, Bytes.toLong(value));
- }
-
- /**
- * Extract a HRegionInfo and ServerName.
- * For use on catalog table {@link Result}.
- * @param r Result to pull from
- * @return A pair of the {@link HRegionInfo} and the {@link ServerName}
- * (or null for server address if no address set in .META.).
- * @throws IOException
- */
- public static Pair<HRegionInfo, ServerName> parseCatalogResult(final Result r)
- throws IOException {
- HRegionInfo info =
- parseHRegionInfoFromCatalogResult(r, HConstants.REGIONINFO_QUALIFIER);
- ServerName sn = getServerNameFromCatalogResult(r);
- return new Pair<HRegionInfo, ServerName>(info, sn);
- }
-
- /**
- * Parse the content of the cell at {@link HConstants#CATALOG_FAMILY} and
- * <code>qualifier</code> as an HRegionInfo and return it, or null.
- * For use on catalog table {@link Result}.
- * @param r Result instance to pull from.
- * @param qualifier Column family qualifier -- either
- * {@link HConstants#SPLITA_QUALIFIER}, {@link HConstants#SPLITB_QUALIFIER} or
- * {@link HConstants#REGIONINFO_QUALIFIER}.
- * @return An HRegionInfo instance or null.
- * @throws IOException
- */
- public static HRegionInfo parseHRegionInfoFromCatalogResult(final Result r,
- byte [] qualifier)
- throws IOException {
- byte [] bytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
- if (bytes == null || bytes.length <= 0) return null;
- return Writables.getHRegionInfoOrNull(bytes);
+ return (r == null || r.isEmpty())? null: HRegionInfo.getHRegionInfoAndServerName(r);
}
/**
@@ -368,7 +315,7 @@ public class MetaReader {
@Override
public boolean visit(Result r) throws IOException {
this.current =
- parseHRegionInfoFromCatalogResult(r, HConstants.REGIONINFO_QUALIFIER);
+ HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
if (this.current == null) {
LOG.warn("No serialized HRegionInfo in " + r);
return true;
@@ -522,14 +469,14 @@ public class MetaReader {
@Override
public boolean visit(Result r) throws IOException {
HRegionInfo hri =
- parseHRegionInfoFromCatalogResult(r, HConstants.REGIONINFO_QUALIFIER);
+ HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
if (hri == null) {
LOG.warn("No serialized HRegionInfo in " + r);
return true;
}
if (!isInsideTable(hri, tableName)) return false;
if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
- ServerName sn = getServerNameFromCatalogResult(r);
+ ServerName sn = HRegionInfo.getServerName(r);
// Populate this.current so available when we call #add
this.current = new Pair<HRegionInfo, ServerName>(hri, sn);
// Else call super and add this Result to the collection.
@@ -563,8 +510,8 @@ public class MetaReader {
@Override
void add(Result r) {
if (r == null || r.isEmpty()) return;
- ServerName sn = getServerNameFromCatalogResult(r);
- if (sn != null && sn.equals(serverName)) this.results.add(r);
+ ServerName sn = HRegionInfo.getServerName(r);
+ if (sn != null && sn.equals(serverName)) this.results.add(r);
}
};
fullScan(catalogTracker, v);
@@ -572,7 +519,7 @@ public class MetaReader {
if (results != null && !results.isEmpty()) {
// Convert results to Map keyed by HRI
for (Result r: results) {
- Pair<HRegionInfo, ServerName> p = parseCatalogResult(r);
+ Pair<HRegionInfo, ServerName> p = HRegionInfo.getHRegionInfoAndServerName(r);
if (p != null && p.getFirst() != null) hris.put(p.getFirst(), r);
}
}
@@ -586,7 +533,7 @@ public class MetaReader {
public boolean visit(Result r) throws IOException {
if (r == null || r.isEmpty()) return true;
LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
- HRegionInfo hrim = MetaEditor.getHRegionInfo(r);
+ HRegionInfo hrim = HRegionInfo.getHRegionInfo(r);
LOG.info("fullScanMetaAndPrint.HRI Print= " + hrim);
return true;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java Tue Aug 28 03:40:47 2012
@@ -56,20 +56,18 @@ import org.apache.hadoop.hbase.UnknownRe
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
-import org.apache.hadoop.hbase.client.AdminProtocol;
-import org.apache.hadoop.hbase.client.ClientProtocol;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
@@ -77,26 +75,24 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
-import org.apache.hadoop.hbase.client.MasterAdminKeepAliveConnection;
-import org.apache.hadoop.hbase.client.MasterMonitorKeepAliveConnection;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
@@ -201,7 +197,7 @@ public class HBaseAdmin implements Abort
this.aborted = true;
throw new RuntimeException(why, e);
}
-
+
@Override
public boolean isAborted(){
return this.aborted;
@@ -409,11 +405,7 @@ public class HBaseAdmin implements Abort
MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
@Override
public boolean processRow(Result rowResult) throws IOException {
- if (rowResult == null || rowResult.size() <= 0) {
- return true;
- }
- HRegionInfo info = MetaReader.parseHRegionInfoFromCatalogResult(
- rowResult, HConstants.REGIONINFO_QUALIFIER);
+ HRegionInfo info = HRegionInfo.getHRegionInfo(rowResult);
if (info == null) {
LOG.warn("No serialized HRegionInfo in " + rowResult);
return true;
@@ -421,14 +413,10 @@ public class HBaseAdmin implements Abort
if (!(Bytes.equals(info.getTableName(), desc.getName()))) {
return false;
}
- String hostAndPort = null;
- byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER);
+ ServerName serverName = HRegionInfo.getServerName(rowResult);
// Make sure that regions are assigned to server
- if (value != null && value.length > 0) {
- hostAndPort = Bytes.toString(value);
- }
- if (!(info.isOffline() || info.isSplit()) && hostAndPort != null) {
+ if (!(info.isOffline() || info.isSplit()) && serverName != null
+ && serverName.getHostAndPort() != null) {
actualRegCount.incrementAndGet();
}
return true;
@@ -610,7 +598,7 @@ public class HBaseAdmin implements Abort
// continue
}
}
-
+
if (tableExists) {
throw new IOException("Retries exhausted, it took too long to wait"+
" for the table " + Bytes.toString(tableName) + " to be deleted.");
@@ -1142,7 +1130,7 @@ public class HBaseAdmin implements Abort
* servername is provided then based on the online regions in the specified
* regionserver the specified region will be closed. The master will not be
* informed of the close. Note that the regionname is the encoded regionname.
- *
+ *
* @param encodedRegionName
* The encoded region name; i.e. the hash that makes up the region
* name suffix: e.g. if regionname is
@@ -1704,17 +1692,13 @@ public class HBaseAdmin implements Abort
MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
@Override
public boolean processRow(Result data) throws IOException {
- if (data == null || data.size() <= 0) {
- return true;
- }
- HRegionInfo info = MetaReader.parseHRegionInfoFromCatalogResult(
- data, HConstants.REGIONINFO_QUALIFIER);
+ HRegionInfo info = HRegionInfo.getHRegionInfo(data);
if (info == null) {
LOG.warn("No serialized HRegionInfo in " + data);
return true;
}
if (!encodedName.equals(info.getEncodedName())) return true;
- ServerName sn = MetaReader.getServerNameFromCatalogResult(data);
+ ServerName sn = HRegionInfo.getServerName(data);
result.set(new Pair<HRegionInfo, ServerName>(info, sn));
return false; // found the region, stop
}
@@ -1887,7 +1871,7 @@ public class HBaseAdmin implements Abort
* @param tableName the name of the table
* @return Ordered list of {@link HRegionInfo}.
* @throws IOException
- */
+ */
public List<HRegionInfo> getTableRegions(final byte[] tableName)
throws IOException {
CatalogTracker ct = getCatalogTracker();
@@ -1899,7 +1883,7 @@ public class HBaseAdmin implements Abort
}
return Regions;
}
-
+
@Override
public void close() throws IOException {
if (this.connection != null) {
@@ -1920,14 +1904,14 @@ public class HBaseAdmin implements Abort
/**
* Roll the log writer. That is, start writing log messages to a new file.
- *
+ *
* @param serverName
* The servername of the regionserver. A server name is made of host,
* port and startcode. This is mandatory. Here is an example:
* <code> host187.example.com,60020,1289493121758</code>
* @return If lots of logs, flush the returned regions so next time through
* we can clean logs. Returns null if nothing to flush. Names are actual
- * region names as returned by {@link HRegionInfo#getEncodedName()}
+ * region names as returned by {@link HRegionInfo#getEncodedName()}
* @throws IOException if a remote or network exception occurs
* @throws FailedLogCloseException
*/
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java Tue Aug 28 03:40:47 2012
@@ -90,7 +90,6 @@ import org.apache.hadoop.hbase.util.Byte
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.SoftValueSortedMap;
import org.apache.hadoop.hbase.util.Triple;
-import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
@@ -851,14 +850,11 @@ public class HConnectionManager {
MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
@Override
public boolean processRow(Result row) throws IOException {
- byte[] value = row.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER);
- HRegionInfo info = Writables.getHRegionInfoOrNull(value);
+ HRegionInfo info = MetaScanner.getHRegionInfo(row);
if (info != null) {
if (Bytes.equals(tableName, info.getTableName())) {
- value = row.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER);
- if (value == null) {
+ ServerName server = HRegionInfo.getServerName(row);
+ if (server == null) {
available.set(false);
return false;
}
@@ -973,39 +969,30 @@ public class HConnectionManager {
MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
public boolean processRow(Result result) throws IOException {
try {
- byte[] value = result.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER);
- HRegionInfo regionInfo = null;
-
- if (value != null) {
- // convert the row result into the HRegionLocation we need!
- regionInfo = Writables.getHRegionInfo(value);
-
- // possible we got a region of a different table...
- if (!Bytes.equals(regionInfo.getTableName(),
- tableName)) {
- return false; // stop scanning
- }
- if (regionInfo.isOffline()) {
- // don't cache offline regions
- return true;
- }
- value = result.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER);
- if (value == null) {
- return true; // don't cache it
- }
- final String hostAndPort = Bytes.toString(value);
- String hostname = Addressing.parseHostname(hostAndPort);
- int port = Addressing.parsePort(hostAndPort);
- value = result.getValue(HConstants.CATALOG_FAMILY,
- HConstants.STARTCODE_QUALIFIER);
- // instantiate the location
- HRegionLocation loc =
- new HRegionLocation(regionInfo, hostname, port);
- // cache this meta entry
- cacheLocation(tableName, loc);
+ HRegionInfo regionInfo = MetaScanner.getHRegionInfo(result);
+ if (regionInfo == null) {
+ return true;
+ }
+
+ // possible we got a region of a different table...
+ if (!Bytes.equals(regionInfo.getTableName(), tableName)) {
+ return false; // stop scanning
+ }
+ if (regionInfo.isOffline()) {
+ // don't cache offline regions
+ return true;
+ }
+
+ ServerName serverName = HRegionInfo.getServerName(result);
+ if (serverName == null) {
+ return true; // don't cache it
}
+ // instantiate the location
+ HRegionLocation loc = new HRegionLocation(regionInfo, serverName.getHostname(),
+ serverName.getPort());
+ // cache this meta entry
+ cacheLocation(tableName, loc);
+
return true;
} catch (RuntimeException e) {
throw new IOException(e);
@@ -1092,15 +1079,14 @@ public class HConnectionManager {
if (regionInfoRow == null) {
throw new TableNotFoundException(Bytes.toString(tableName));
}
- byte [] value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER);
- if (value == null || value.length == 0) {
+
+ // convert the row result into the HRegionLocation we need!
+ HRegionInfo regionInfo = MetaScanner.getHRegionInfo(regionInfoRow);
+ if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in " +
Bytes.toString(parentTable) + ", row=" + regionInfoRow);
}
- // convert the row result into the HRegionLocation we need!
- HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable(
- value, new HRegionInfo());
+
// possible we got a region of a different table...
if (!Bytes.equals(regionInfo.getTableName(), tableName)) {
throw new TableNotFoundException(
@@ -1119,13 +1105,8 @@ public class HConnectionManager {
regionInfo.getRegionNameAsString());
}
- value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER);
- String hostAndPort = "";
- if (value != null) {
- hostAndPort = Bytes.toString(value);
- }
- if (hostAndPort.equals("")) {
+ ServerName serverName = HRegionInfo.getServerName(regionInfoRow);
+ if (serverName == null) {
throw new NoServerForRegionException("No server address listed " +
"in " + Bytes.toString(parentTable) + " for region " +
regionInfo.getRegionNameAsString() + " containing row " +
@@ -1133,9 +1114,8 @@ public class HConnectionManager {
}
// Instantiate the location
- String hostname = Addressing.parseHostname(hostAndPort);
- int port = Addressing.parsePort(hostAndPort);
- location = new HRegionLocation(regionInfo, hostname, port);
+ location =
+ new HRegionLocation(regionInfo, serverName.getHostname(), serverName.getPort());
cacheLocation(tableName, location);
return location;
} catch (TableNotFoundException e) {
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTable.java Tue Aug 28 03:40:47 2012
@@ -21,15 +21,16 @@ package org.apache.hadoop.hbase.client;
import java.io.Closeable;
import java.io.DataInput;
+import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
-import java.util.Collections;
import java.util.NavigableMap;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService;
@@ -54,6 +55,7 @@ import org.apache.hadoop.hbase.ServerNam
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.io.DataInputInputStream;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.hbase.ipc.ExecRPCInvoker;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -77,10 +79,10 @@ import com.google.protobuf.ServiceExcept
* <p>Used to communicate with a single HBase table.
*
* <p>This class is not thread safe for reads nor write.
- *
+ *
* <p>In case of writes (Put, Delete), the underlying write buffer can
* be corrupted if multiple threads contend over a single HTable instance.
- *
+ *
* <p>In case of reads, some fields used by a Scan are shared among all threads.
* The HTable implementation can either not contract to be safe in case of a Get
*
@@ -107,7 +109,7 @@ import com.google.protobuf.ServiceExcept
*
* <p>Note that this class implements the {@link Closeable} interface. When a
* HTable instance is no longer required, it *should* be closed in order to ensure
- * that the underlying resources are promptly released. Please note that the close
+ * that the underlying resources are promptly released. Please note that the close
* method can throw java.io.IOException that must be handled.
*
* @see HBaseAdmin for create, drop, list, enable and disable of tables.
@@ -224,7 +226,7 @@ public class HTable implements HTableInt
* @param pool ExecutorService to be used.
* @throws IOException if a remote or network exception occurs
*/
- public HTable(final byte[] tableName, final HConnection connection,
+ public HTable(final byte[] tableName, final HConnection connection,
final ExecutorService pool) throws IOException {
if (pool == null || pool.isShutdown()) {
throw new IllegalArgumentException("Pool is null or shut down.");
@@ -367,7 +369,7 @@ public class HTable implements HTableInt
throws IOException {
return connection.getRegionLocation(tableName, row, reload);
}
-
+
/**
* {@inheritDoc}
*/
@@ -580,13 +582,16 @@ public class HTable implements HTableInt
* </pre>
* @param out {@link DataOutput} to serialize this object into.
* @throws IOException if a remote or network exception occurs
+ * @deprecated serializing/deserializing regioninfo's are deprecated
*/
+ @Deprecated
public void serializeRegionInfo(DataOutput out) throws IOException {
Map<HRegionInfo, HServerAddress> allRegions = this.getRegionsInfo();
// first, write number of regions
out.writeInt(allRegions.size());
for (Map.Entry<HRegionInfo, HServerAddress> es : allRegions.entrySet()) {
- es.getKey().write(out);
+ byte[] hriBytes = es.getKey().toDelimitedByteArray();
+ out.write(hriBytes);
es.getValue().write(out);
}
}
@@ -606,19 +611,27 @@ public class HTable implements HTableInt
* @param in {@link DataInput} object.
* @return A map of HRegionInfo with its server address.
* @throws IOException if an I/O exception occurs.
+ * @deprecated serializing/deserializing regioninfo's are deprecated
*/
+ @Deprecated
public Map<HRegionInfo, HServerAddress> deserializeRegionInfo(DataInput in)
throws IOException {
final Map<HRegionInfo, HServerAddress> allRegions =
new TreeMap<HRegionInfo, HServerAddress>();
+ DataInputStream is = null;
+ if (in instanceof DataInputStream) {
+ is = (DataInputStream) in;
+ } else {
+ is = new DataInputStream(DataInputInputStream.constructInputStream(in));
+ }
+
// the first integer is expected to be the size of records
- int regionsCount = in.readInt();
+ int regionsCount = is.readInt();
for (int i = 0; i < regionsCount; ++i) {
- HRegionInfo hri = new HRegionInfo();
- hri.readFields(in);
+ HRegionInfo hri = HRegionInfo.parseFrom(is);
HServerAddress hsa = new HServerAddress();
- hsa.readFields(in);
+ hsa.readFields(is);
allRegions.put(hri, hsa);
}
return allRegions;
@@ -802,7 +815,7 @@ public class HTable implements HTableInt
validatePut(put);
writeBuffer.add(put);
currentWriteBufferSize += put.heapSize();
-
+
// we need to periodically see if the writebuffer is full instead of waiting until the end of the List
n++;
if (n % DOPUT_WB_CHECK == 0 && currentWriteBufferSize > writeBufferSize) {
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java Tue Aug 28 03:40:47 2012
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.ServerNam
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.hbase.util.PairOfSameType;
/**
* Scanner class that contains the <code>.META.</code> table scanning logic
@@ -166,13 +166,11 @@ public class MetaScanner {
throw new TableNotFoundException("Cannot find row in .META. for table: "
+ Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow));
}
- byte[] value = startRowResult.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER);
- if (value == null || value.length == 0) {
+ HRegionInfo regionInfo = getHRegionInfo(startRowResult);
+ if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in Meta for " +
Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow));
}
- HRegionInfo regionInfo = Writables.getHRegionInfo(value);
byte[] rowBefore = regionInfo.getStartKey();
startRow = HRegionInfo.createRegionName(tableName, rowBefore,
@@ -240,6 +238,24 @@ public class MetaScanner {
}
/**
+ * Returns HRegionInfo object from the column
+ * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
+ * table Result.
+ * @param data a Result object from the catalog table scan
+ * @return HRegionInfo or null
+ */
+ public static HRegionInfo getHRegionInfo(Result data) {
+ byte [] bytes =
+ data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+ if (bytes == null) return null;
+ HRegionInfo info = HRegionInfo.parseFromOrNull(bytes);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Current INFO from scan results = " + info);
+ }
+ return info;
+ }
+
+ /**
* Lists all of the regions currently in META.
* @param conf
* @return List of all user-space regions.
@@ -267,13 +283,13 @@ public class MetaScanner {
if (result == null || result.isEmpty()) {
return true;
}
- byte [] bytes = result.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER);
- if (bytes == null) {
+
+ HRegionInfo regionInfo = getHRegionInfo(result);
+ if (regionInfo == null) {
LOG.warn("Null REGIONINFO_QUALIFIER: " + result);
return true;
}
- HRegionInfo regionInfo = Writables.getHRegionInfo(bytes);
+
// If region offline AND we are not to include offlined regions, return.
if (regionInfo.isOffline() && !offlined) return true;
regions.add(regionInfo);
@@ -299,25 +315,11 @@ public class MetaScanner {
MetaScannerVisitor visitor = new TableMetaScannerVisitor(conf, tablename) {
@Override
public boolean processRowInternal(Result rowResult) throws IOException {
- HRegionInfo info = Writables.getHRegionInfo(
- rowResult.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER));
- byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER);
- String hostAndPort = null;
- if (value != null && value.length > 0) {
- hostAndPort = Bytes.toString(value);
- }
- value = rowResult.getValue(HConstants.CATALOG_FAMILY,
- HConstants.STARTCODE_QUALIFIER);
- long startcode = -1L;
- if (value != null && value.length > 0) startcode = Bytes.toLong(value);
+ HRegionInfo info = getHRegionInfo(rowResult);
+ ServerName serverName = HRegionInfo.getServerName(rowResult);
+
if (!(info.isOffline() || info.isSplit())) {
- ServerName sn = null;
- if (hostAndPort != null && hostAndPort.length() > 0) {
- sn = new ServerName(hostAndPort, startcode);
- }
- regions.put(new UnmodifyableHRegionInfo(info), sn);
+ regions.put(new UnmodifyableHRegionInfo(info), serverName);
}
return true;
}
@@ -389,9 +391,7 @@ public class MetaScanner {
@Override
public boolean processRow(Result rowResult) throws IOException {
- HRegionInfo info = Writables.getHRegionInfoOrNull(
- rowResult.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER));
+ HRegionInfo info = getHRegionInfo(rowResult);
if (info == null) {
return true;
}
@@ -405,10 +405,9 @@ public class MetaScanner {
* seen by this scanner as well, so we block until they are added to the META table. Even
* though we are waiting for META entries, ACID semantics in HBase indicates that this
* scanner might not see the new rows. So we manually query the daughter rows */
- HRegionInfo splitA = Writables.getHRegionInfo(rowResult.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SPLITA_QUALIFIER));
- HRegionInfo splitB = Writables.getHRegionInfo(rowResult.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SPLITB_QUALIFIER));
+ PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowResult);
+ HRegionInfo splitA = daughters.getFirst();
+ HRegionInfo splitB = daughters.getSecond();
HTable metaTable = getMetaTable();
long start = System.currentTimeMillis();
@@ -446,8 +445,7 @@ public class MetaScanner {
while (System.currentTimeMillis() - start < timeout) {
Get get = new Get(regionName);
Result result = metaTable.get(get);
- HRegionInfo info = Writables.getHRegionInfoOrNull(
- result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
+ HRegionInfo info = getHRegionInfo(result);
if (info != null) {
return result;
}
@@ -478,8 +476,7 @@ public class MetaScanner {
@Override
public final boolean processRow(Result rowResult) throws IOException {
- HRegionInfo info = Writables.getHRegionInfoOrNull(
- rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
+ HRegionInfo info = getHRegionInfo(rowResult);
if (info == null) {
return true;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Result.java Tue Aug 28 03:40:47 2012
@@ -94,6 +94,8 @@ public class Result implements Writable,
/**
* Instantiate a Result with the specified array of KeyValues.
+ * <br><strong>Note:</strong> You must ensure that the keyvalues
+ * are already sorted
* @param kvs array of KeyValues
*/
public Result(KeyValue [] kvs) {
@@ -104,6 +106,8 @@ public class Result implements Writable,
/**
* Instantiate a Result with the specified List of KeyValues.
+ * <br><strong>Note:</strong> You must ensure that the keyvalues
+ * are already sorted
* @param kvs List of KeyValues
*/
public Result(List<KeyValue> kvs) {
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java?rev=1377965&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/DataInputInputStream.java Tue Aug 28 03:40:47 2012
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * An InputStream that wraps a DataInput.
+ * @see DataOutputOutputStream
+ */
+@InterfaceAudience.Private
+public class DataInputInputStream extends InputStream {
+
+ private DataInput in;
+
+ /**
+ * Construct an InputStream from the given DataInput. If 'in'
+ * is already an InputStream, simply returns it. Otherwise, wraps
+ * it in an InputStream.
+ * @param in the DataInput to wrap
+ * @return an InputStream instance that reads from 'in'
+ */
+ public static InputStream constructInputStream(DataInput in) {
+ if (in instanceof InputStream) {
+ return (InputStream)in;
+ } else {
+ return new DataInputInputStream(in);
+ }
+ }
+
+
+ public DataInputInputStream(DataInput in) {
+ this.in = in;
+ }
+
+ @Override
+ public int read() throws IOException {
+ return in.readUnsignedByte();
+ }
+}