You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2007/12/10 23:36:08 UTC
svn commit: r603077 - in /lucene/hadoop/trunk/src/contrib/hbase: ./
src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/
Author: stack
Date: Mon Dec 10 14:36:03 2007
New Revision: 603077
URL: http://svn.apache.org/viewvc?rev=603077&view=rev
Log:
HADOOP-2283 AlreadyBeingCreatedException (Was: Stuck replay of failed
regionserver edits)
Modified:
lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java
Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt?rev=603077&r1=603076&r2=603077&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt Mon Dec 10 14:36:03 2007
@@ -68,6 +68,8 @@
(Bryan Duxbury via Stack)
HADOOP-2350 Scanner api returns null row names, or skips row names if
different column families do not have entries for some rows
+ HADOOP-2283 AlreadyBeingCreatedException (Was: Stuck replay of failed
+ regionserver edits)
IMPROVEMENTS
HADOOP-2401 Add convenience put method that takes writable
Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java?rev=603077&r1=603076&r2=603077&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java Mon Dec 10 14:36:03 2007
@@ -160,11 +160,10 @@
" because zero length");
continue;
}
- SequenceFile.Reader in =
- new SequenceFile.Reader(fs, logfiles[i], conf);
+ HLogKey key = new HLogKey();
+ HLogEdit val = new HLogEdit();
+ SequenceFile.Reader in = new SequenceFile.Reader(fs, logfiles[i], conf);
try {
- HLogKey key = new HLogKey();
- HLogEdit val = new HLogEdit();
int count = 0;
for (; in.next(key, val); count++) {
Text regionName = key.getRegionName();
@@ -174,13 +173,16 @@
HRegionInfo.encodeRegionName(regionName)),
HREGION_OLDLOGFILE_NAME);
if (LOG.isDebugEnabled()) {
- LOG.debug("Creating new log file writer for path " + logfile);
+ LOG.debug("Creating new log file writer for path " + logfile +
+ "; map content " + logWriters.toString());
}
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
HLogEdit.class);
- logWriters.put(regionName, w);
+ // Use copy of regionName; regionName object is reused inside in
+ // HStoreKey.getRegionName so its content changes as we iterate.
+ logWriters.put(new Text(regionName), w);
}
- if (count % 100 == 0 && count > 0 && LOG.isDebugEnabled()) {
+ if (count % 10000 == 0 && count > 0 && LOG.isDebugEnabled()) {
LOG.debug("Applied " + count + " edits");
}
w.append(key, val);
Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java?rev=603077&r1=603076&r2=603077&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java Mon Dec 10 14:36:03 2007
@@ -823,9 +823,11 @@
}
FSDataOutputStream out =
fs.create(new Path(filterDir, BLOOMFILTER_FILE_NAME));
-
- bloomFilter.write(out);
- out.close();
+ try {
+ bloomFilter.write(out);
+ } finally {
+ out.close();
+ }
if (LOG.isDebugEnabled()) {
LOG.debug("flushed bloom filter for " + this.storeName);
}
Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java?rev=603077&r1=603076&r2=603077&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java Mon Dec 10 14:36:03 2007
@@ -492,11 +492,14 @@
throw new IOException("File already exists " + p.toString());
}
FSDataOutputStream out = fs.create(p);
- out.writeUTF(getReference().getEncodedRegionName());
- getReference().getMidkey().write(out);
- out.writeLong(getReference().getFileId());
- out.writeBoolean(isTopFileRegion(getReference().getFileRegion()));
- out.close();
+ try {
+ out.writeUTF(getReference().getEncodedRegionName());
+ getReference().getMidkey().write(out);
+ out.writeLong(getReference().getFileId());
+ out.writeBoolean(isTopFileRegion(getReference().getFileRegion()));
+ } finally {
+ out.close();
+ }
}
/*
@@ -559,7 +562,6 @@
in.close();
}
}
-
} finally {
out.close();
}
@@ -867,7 +869,6 @@
static class Writer extends MapFile.Writer {
private final Filter bloomFilter;
-
/**
* Constructor
*
Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java?rev=603077&r1=603076&r2=603077&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java Mon Dec 10 14:36:03 2007
@@ -30,16 +30,63 @@
/** JUnit test case for HLog */
public class TestHLog extends HBaseTestCase implements HConstants {
+ private Path dir;
+ private FileSystem fs;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ this.dir = getUnitTestdir(getName());
+ this.fs = FileSystem.get(this.conf);
+ if (fs.exists(dir)) {
+ fs.delete(dir);
+ }
+ }
+ @Override
+ protected void tearDown() throws Exception {
+ if (this.fs.exists(this.dir)) {
+ this.fs.delete(this.dir);
+ }
+ super.tearDown();
+ }
+
/**
+ * Just write multiple logs then split. Before fix for HADOOP-2283, this
+ * would fail.
* @throws IOException
*/
- public void testAppend() throws IOException {
- Path dir = getUnitTestdir(getName());
- FileSystem fs = FileSystem.get(this.conf);
- if (fs.exists(dir)) {
- fs.delete(dir);
+ public void testSplit() throws IOException {
+ final Text tableName = new Text(getName());
+ final Text rowName = tableName;
+ HLog log = new HLog(this.fs, this.dir, this.conf, null);
+ // Add edits for three regions.
+ try {
+ for (int ii = 0; ii < 3; ii++) {
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 3; j++) {
+ TreeMap<HStoreKey, byte[]> edit = new TreeMap<HStoreKey, byte[]>();
+ Text column = new Text(Integer.toString(j));
+ edit.put(
+ new HStoreKey(rowName, column, System.currentTimeMillis()),
+ column.getBytes());
+ log.append(new Text(Integer.toString(i)), tableName, edit);
+ }
+ }
+ log.rollWriter();
+ }
+ HLog.splitLog(this.testDir, this.dir, this.fs, this.conf);
+ } finally {
+ if (log != null) {
+ log.closeAndDelete();
+ }
}
+ }
+
+ /**
+ * @throws IOException
+ */
+ public void testAppend() throws IOException {
final int COL_COUNT = 10;
final Text regionName = new Text("regionname");
final Text tableName = new Text("tablename");
@@ -88,9 +135,6 @@
}
if (reader != null) {
reader.close();
- }
- if (fs.exists(dir)) {
- fs.delete(dir);
}
}
}