You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2014/03/16 00:17:35 UTC
svn commit: r1577947 - in /hbase/branches/0.98/hbase-server/src:
main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
Author: apurtell
Date: Sat Mar 15 23:17:35 2014
New Revision: 1577947
URL: http://svn.apache.org/r1577947
Log:
HBASE-10763 Backport HBASE-10549 to 0.98 (rajeshbabu)
Modified:
hbase/branches/0.98/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
hbase/branches/0.98/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
Modified: hbase/branches/0.98/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.98/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1577947&r1=1577946&r2=1577947&view=diff
==============================================================================
--- hbase/branches/0.98/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (original)
+++ hbase/branches/0.98/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java Sat Mar 15 23:17:35 2014
@@ -490,6 +490,7 @@ public class LoadIncrementalHFiles exten
* LQI's corresponding to the resultant hfiles.
*
* protected for testing
+ * @throws IOException
*/
protected List<LoadQueueItem> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups,
final LoadQueueItem item, final HTable table,
@@ -530,6 +531,30 @@ public class LoadIncrementalHFiles exten
idx = -(idx + 1) - 1;
}
final int indexForCallable = idx;
+
+ /**
+ * we can consider there is a region hole in following conditions. 1) if idx < 0,then first
+ * region info is lost. 2) if the endkey of a region is not equal to the startkey of the next
+ * region. 3) if the endkey of the last region is not empty.
+ */
+ if (indexForCallable < 0) {
+ throw new IOException("The first region info for table "
+ + Bytes.toString(table.getTableName())
+ + " cann't be found in hbase:meta.Please use hbck tool to fix it first.");
+ } else if ((indexForCallable == startEndKeys.getFirst().length - 1)
+ && !Bytes.equals(startEndKeys.getSecond()[indexForCallable], HConstants.EMPTY_BYTE_ARRAY)) {
+ throw new IOException("The last region info for table "
+ + Bytes.toString(table.getTableName())
+ + " cann't be found in hbase:meta.Please use hbck tool to fix it first.");
+ } else if (indexForCallable + 1 < startEndKeys.getFirst().length
+ && !(Bytes.compareTo(startEndKeys.getSecond()[indexForCallable],
+ startEndKeys.getFirst()[indexForCallable + 1]) == 0)) {
+ throw new IOException("The endkey of one region for table "
+ + Bytes.toString(table.getTableName())
+ + " is not equal to the startkey of the next region in hbase:meta."
+ + "Please use hbck tool to fix it first.");
+ }
+
boolean lastKeyInRange =
Bytes.compareTo(last, startEndKeys.getSecond()[idx]) < 0 ||
Bytes.equals(startEndKeys.getSecond()[idx], HConstants.EMPTY_BYTE_ARRAY);
Modified: hbase/branches/0.98/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.98/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java?rev=1577947&r1=1577946&r2=1577947&view=diff
==============================================================================
--- hbase/branches/0.98/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java (original)
+++ hbase/branches/0.98/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java Sat Mar 15 23:17:35 2014
@@ -35,6 +35,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -44,6 +45,9 @@ import org.apache.hadoop.hbase.HTableDes
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
@@ -119,7 +123,7 @@ public class TestLoadIncrementalHFilesSp
try {
LOG.info("Creating table " + table);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
- for (int i = 0; i < 10; i++) {
+ for (int i = 0; i < cfs; i++) {
htd.addFamily(new HColumnDescriptor(family(i)));
}
@@ -129,6 +133,28 @@ public class TestLoadIncrementalHFilesSp
}
}
+ /**
+ * Creates a table with given table name,specified number of column families<br>
+ * and splitkeys if the table does not already exist.
+ * @param table
+ * @param cfs
+ * @param SPLIT_KEYS
+ */
+ private void setupTableWithSplitkeys(String table, int cfs, byte[][] SPLIT_KEYS)
+ throws IOException {
+ try {
+ LOG.info("Creating table " + table);
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
+ for (int i = 0; i < cfs; i++) {
+ htd.addFamily(new HColumnDescriptor(family(i)));
+ }
+
+ util.getHBaseAdmin().createTable(htd, SPLIT_KEYS);
+ } catch (TableExistsException tee) {
+ LOG.info("Table " + table + " already exists");
+ }
+ }
+
private Path buildBulkFiles(String table, int value) throws Exception {
Path dir = util.getDataTestDirOnTestFS(table);
Path bulk1 = new Path(dir, table+value);
@@ -199,7 +225,7 @@ public class TestLoadIncrementalHFilesSp
/**
* Checks that all columns have the expected value and that there is the
* expected number of rows.
- * @throws IOException
+ * @throws IOException
*/
void assertExpectedTable(String table, int count, int value) throws IOException {
HTable t = null;
@@ -403,5 +429,60 @@ public class TestLoadIncrementalHFilesSp
fail("doBulkLoad should have thrown an exception");
}
+ @Test
+ public void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception {
+ String tableName = "testGroupOrSplitWhenRegionHoleExistsInMeta";
+ byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000100") };
+
+ setupTableWithSplitkeys(tableName, 10, SPLIT_KEYS);
+ HTable table = new HTable(util.getConfiguration(), Bytes.toBytes(tableName));
+ Path dir = buildBulkFiles(tableName, 2);
+
+ final AtomicInteger countedLqis = new AtomicInteger();
+ LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()) {
+
+ protected List<LoadQueueItem>
+ groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item,
+ final HTable htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
+ List<LoadQueueItem> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
+ if (lqis != null) {
+ countedLqis.addAndGet(lqis.size());
+ }
+ return lqis;
+ }
+ };
+
+ // do bulkload when there is no region hole in hbase:meta.
+ try {
+ loader.doBulkLoad(dir, table);
+ } catch (Exception e) {
+ LOG.error("exeception=", e);
+ }
+ // check if all the data are loaded into the table.
+ this.assertExpectedTable(tableName, ROWCOUNT, 2);
+
+ dir = buildBulkFiles(tableName, 3);
+
+ // Mess it up by leaving a hole in the hbase:meta
+ CatalogTracker ct = new CatalogTracker(util.getConfiguration());
+ List<HRegionInfo> regionInfos = MetaReader.getTableRegions(ct, TableName.valueOf(tableName));
+ for (HRegionInfo regionInfo : regionInfos) {
+ if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
+ MetaEditor.deleteRegion(ct, regionInfo);
+ break;
+ }
+ }
+
+ try {
+ loader.doBulkLoad(dir, table);
+ } catch (Exception e) {
+ LOG.error("exeception=", e);
+ assertTrue("IOException expected", e instanceof IOException);
+ }
+
+ table.close();
+
+ this.assertExpectedTable(tableName, ROWCOUNT, 2);
+ }
}