You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2010/01/26 03:28:25 UTC
svn commit: r903054 [3/3] - in /hadoop/hbase/branches/0.20_on_hadoop-0.21:
./ bin/ lib/ lib/native/Linux-amd64-64/ lib/native/Linux-i386-32/
src/contrib/ src/contrib/ec2/bin/
src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/
src/contri...
Added: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java?rev=903054&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java Tue Jan 26 02:28:18 2010
@@ -0,0 +1,110 @@
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.io.SequenceFile;
+
+public class SequenceFileLogReader implements HLog.Reader {
+
+ /**
+ * Hack just to set the correct file length up in SequenceFile.Reader.
+ * See HADOOP-6307. The below is all about setting the right length on the
+ * file we are reading. fs.getFileStatus(file).getLen() is passed down to
+ * a private SequenceFile.Reader constructor. This won't work. Need to do
+ * the available on the stream. The below is ugly. It makes getPos, the
+ * first time its called, return length of the file -- i.e. tell a lie -- just
+ * so this line up in SF.Reader's constructor ends up with right answer:
+ *
+ * this.end = in.getPos() + length;
+ *
+ */
+ private static class WALReader extends SequenceFile.Reader {
+
+ WALReader(final FileSystem fs, final Path p, final HBaseConfiguration c)
+ throws IOException {
+ super(fs, p, c);
+
+ }
+
+ @Override
+ protected FSDataInputStream openFile(FileSystem fs, Path file,
+ int bufferSize, long length)
+ throws IOException {
+ return new WALReaderFSDataInputStream(super.openFile(fs, file,
+ bufferSize, length), length);
+ }
+
+ /**
+ * Override just so can intercept first call to getPos.
+ */
+ static class WALReaderFSDataInputStream extends FSDataInputStream {
+ private boolean firstGetPosInvocation = true;
+ private long length;
+
+ WALReaderFSDataInputStream(final FSDataInputStream is, final long l)
+ throws IOException {
+ super(is);
+ this.length = l;
+ }
+
+ @Override
+ public long getPos() throws IOException {
+ if (this.firstGetPosInvocation) {
+ this.firstGetPosInvocation = false;
+ // Tell a lie. We're doing this just so that this line up in
+ // SequenceFile.Reader constructor comes out with the correct length
+ // on the file:
+ // this.end = in.getPos() + length;
+ long available = this.in.available();
+ // Length gets added up in the SF.Reader constructor so subtract the
+ // difference. If available < this.length, then return this.length.
+ return available >= this.length? available - this.length: this.length;
+ }
+ return super.getPos();
+ }
+ }
+ }
+
+ HBaseConfiguration conf;
+ WALReader reader;
+
+ public SequenceFileLogReader() { }
+
+ @Override
+ public void init(FileSystem fs, Path path, HBaseConfiguration conf)
+ throws IOException {
+ this.conf = conf;
+ reader = new WALReader(fs, path, conf);
+ }
+
+ @Override
+ public void close() throws IOException {
+ reader.close();
+ }
+
+ @Override
+ public HLog.Entry next() throws IOException {
+ return next(null);
+ }
+
+ @Override
+ public HLog.Entry next(HLog.Entry reuse) throws IOException {
+ if (reuse == null) {
+ HLogKey key = HLog.newKey(conf);
+ KeyValue val = new KeyValue();
+ if (reader.next(key, val)) {
+ return new HLog.Entry(key, val);
+ }
+ } else if (reader.next(reuse.getKey(), reuse.getEdit())) {
+ return reuse;
+ }
+ return null;
+ }
+
+}
Added: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java?rev=903054&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java Tue Jan 26 02:28:18 2010
@@ -0,0 +1,74 @@
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.SequenceFile.Metadata;
+import org.apache.hadoop.io.compress.DefaultCodec;
+
+public class SequenceFileLogWriter implements HLog.Writer {
+
+ SequenceFile.Writer writer;
+ FSDataOutputStream writer_out;
+
+ public SequenceFileLogWriter() { }
+
+ @Override
+ public void init(FileSystem fs, Path path, HBaseConfiguration conf)
+ throws IOException {
+ writer = SequenceFile.createWriter(fs, conf, path,
+ HLog.getKeyClass(conf), KeyValue.class,
+ fs.getConf().getInt("io.file.buffer.size", 4096),
+ (short) conf.getInt("hbase.regionserver.hlog.replication",
+ fs.getDefaultReplication()),
+ conf.getLong("hbase.regionserver.hlog.blocksize",
+ fs.getDefaultBlockSize()),
+ SequenceFile.CompressionType.NONE,
+ new DefaultCodec(),
+ null,
+ new Metadata());
+
+ // Get at the private FSDataOutputStream inside in SequenceFile so we can
+ // call sync on it. Make it accessible. Stash it aside for call up in
+ // the sync method.
+ final Field fields[] = writer.getClass().getDeclaredFields();
+ final String fieldName = "out";
+ for (int i = 0; i < fields.length; ++i) {
+ if (fieldName.equals(fields[i].getName())) {
+ try {
+ fields[i].setAccessible(true);
+ this.writer_out = (FSDataOutputStream)fields[i].get(writer);
+ break;
+ } catch (IllegalAccessException ex) {
+ throw new IOException("Accessing " + fieldName, ex);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void append(HLog.Entry entry) throws IOException {
+ this.writer.append(entry.getKey(), entry.getEdit());
+ }
+
+ @Override
+ public void close() throws IOException {
+ this.writer.close();
+ }
+
+ @Override
+ public void sync() throws IOException {
+ this.writer.sync();
+ if (this.writer_out != null) {
+ this.writer_out.sync();
+ }
+ }
+
+}
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/InfoServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/InfoServer.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/InfoServer.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/InfoServer.java Tue Jan 26 02:28:18 2010
@@ -23,10 +23,8 @@
import org.apache.hadoop.http.HttpServer;
import org.mortbay.jetty.handler.ContextHandlerCollection;
-import org.mortbay.jetty.handler.HandlerCollection;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.DefaultServlet;
-import org.mortbay.jetty.webapp.WebAppContext;
/**
* Create a Jetty embedded server to answer http requests. The primary goal
@@ -50,19 +48,7 @@
public InfoServer(String name, String bindAddress, int port, boolean findPort)
throws IOException {
super(name, bindAddress, port, findPort);
-
- HandlerCollection handlers =
- new ContextHandlerCollection();
-
- if (name.equals("master")) {
- // Put up the rest webapp.
- WebAppContext wac = new WebAppContext();
- wac.setContextPath("/api");
- wac.setWar(getWebAppDir("rest"));
-
- handlers.addHandler(wac);
- }
- webServer.addHandler(handlers);
+ webServer.addHandler(new ContextHandlerCollection());
}
protected void addDefaultApps(ContextHandlerCollection parent, String appDir)
@@ -78,7 +64,9 @@
break;
}
}
- defaultContexts.put(oldLogsContext, Boolean.FALSE);
+ if (oldLogsContext != null) {
+ this.defaultContexts.put(oldLogsContext, Boolean.FALSE);
+ }
// Now do my logs.
// set up the context for "/logs/" if "hadoop.log.dir" property is defined.
String logDir = System.getProperty("hbase.log.dir");
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/Merge.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/Merge.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/Merge.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/Merge.java Tue Jan 26 02:28:18 2010
@@ -36,8 +36,8 @@
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.regionserver.HLog;
import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
@@ -333,7 +333,7 @@
*
* @throws IOException
*/
- private int parseArgs(String[] args) {
+ private int parseArgs(String[] args) throws IOException {
GenericOptionsParser parser =
new GenericOptionsParser(this.getConf(), args);
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/MetaUtils.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/MetaUtils.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/MetaUtils.java Tue Jan 26 02:28:18 2010
@@ -42,10 +42,10 @@
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.regionserver.HLog;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
/**
* Contains utility methods for manipulating HBase meta tables.
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/Migrate.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/java/org/apache/hadoop/hbase/util/Migrate.java Tue Jan 26 02:28:18 2010
@@ -44,7 +44,6 @@
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
import org.apache.hadoop.hbase.migration.nineteen.io.BloomFilterMapFile;
import org.apache.hadoop.hbase.migration.nineteen.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -115,7 +114,7 @@
}
/*
- * Sets the hbase rootdir as fs.default.name.
+ * Sets the hbase rootdir as fs.defaultFS.
* @return True if succeeded.
*/
private boolean setFsDefaultName() {
@@ -130,7 +129,7 @@
" configuration parameter '" + HConstants.HBASE_DIR + "'", e);
return false;
}
- getConf().set("fs.default.name", rd.toString());
+ getConf().set("fs.defaultFS", rd.toString());
return true;
}
@@ -163,7 +162,7 @@
}
}
- public int run(String[] args) {
+ public int run(String[] args) throws IOException {
if (parseArgs(args) != 0) {
return -1;
}
@@ -511,7 +510,7 @@
return result;
}
- private int parseArgs(String[] args) {
+ private int parseArgs(String[] args) throws IOException {
Options opts = new Options();
GenericOptionsParser parser =
new GenericOptionsParser(this.getConf(), opts, args);
Added: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/mapred-queues.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/mapred-queues.xml?rev=903054&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/mapred-queues.xml (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/mapred-queues.xml Tue Jan 26 02:28:18 2010
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<!-- This is the template for queue configuration. The format supports nesting of
+ queues within queues - a feature called hierarchical queues. All queues are
+ defined within the 'queues' tag which is the top level element for this
+ XML document.
+ The 'aclsEnabled' attribute should be set to true, if ACLs should be checked
+ on queue operations such as submitting jobs, killing jobs etc. -->
+<queues aclsEnabled="false">
+
+ <!-- Configuration for a queue is specified by defining a 'queue' element. -->
+ <queue>
+
+ <!-- Name of a queue. Queue name cannot contain a ':' -->
+ <name>default</name>
+
+ <!-- properties for a queue, typically used by schedulers,
+ can be defined here -->
+ <properties>
+ </properties>
+
+ <!-- State of the queue. If running, the queue will accept new jobs.
+ If stopped, the queue will not accept new jobs. -->
+ <state>running</state>
+
+ <!-- Specifies the ACLs to check for submitting jobs to this queue.
+ If set to '*', it allows all users to submit jobs to the queue.
+ For specifying a list of users and groups the format to use is
+ user1,user2 group1,group2 -->
+ <acl-submit-job>*</acl-submit-job>
+
+ <!-- Specifies the ACLs to check for modifying jobs in this queue.
+ Modifications include killing jobs, tasks of jobs or changing
+ priorities.
+ If set to '*', it allows all users to submit jobs to the queue.
+ For specifying a list of users and groups the format to use is
+ user1,user2 group1,group2 -->
+ <acl-administer-jobs>*</acl-administer-jobs>
+ </queue>
+
+ <!-- Here is a sample of a hierarchical queue configuration
+ where q2 is a child of q1. In this example, q2 is a leaf level
+ queue as it has no queues configured within it. Currently, ACLs
+ and state are only supported for the leaf level queues.
+ Note also the usage of properties for the queue q2.
+ <queue>
+ <name>q1</name>
+ <queue>
+ <name>q2</name>
+ <properties>
+ <property key="capacity" value="20"/>
+ <property key="user-limit" value="30"/>
+ </properties>
+ </queue>
+ </queue>
+ -->
+</queues>
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java Tue Jan 26 02:28:18 2010
@@ -125,7 +125,7 @@
// mangle the conf so that the fs parameter points to the minidfs we
// just started up
FileSystem filesystem = dfsCluster.getFileSystem();
- conf.set("fs.default.name", filesystem.getUri().toString());
+ conf.set("fs.defaultFS", filesystem.getUri().toString());
Path parentdir = filesystem.getHomeDirectory();
conf.set(HConstants.HBASE_DIR, parentdir.toString());
filesystem.mkdirs(parentdir);
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/HBaseTestCase.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/HBaseTestCase.java Tue Jan 26 02:28:18 2010
@@ -114,7 +114,7 @@
protected void setUp() throws Exception {
super.setUp();
localfs =
- (conf.get("fs.default.name", "file:///").compareTo("file:///") == 0);
+ (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0);
if (fs == null) {
this.fs = FileSystem.get(conf);
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Tue Jan 26 02:28:18 2010
@@ -147,7 +147,7 @@
LOG.info("Stopping " + server.toString());
if (!shutdownFS) {
// Stop the running of the hdfs shutdown thread in tests.
- server.getRegionServer().setHDFSShutdownThreadOnExit(null);
+ server.getRegionServer().setShutdownHDFS(false);
}
server.getRegionServer().stop();
return server;
@@ -172,8 +172,9 @@
/**
* Shut down the mini HBase cluster
+ * @throws IOException
*/
- public void shutdown() {
+ public void shutdown() throws IOException {
if (this.hbaseCluster != null) {
this.hbaseCluster.shutdown();
}
@@ -199,7 +200,7 @@
public List<LocalHBaseCluster.RegionServerThread> getRegionThreads() {
return this.hbaseCluster.getRegionServers();
}
-
+
/**
* Grab a numbered region server of your choice.
* @param serverNumber
@@ -208,4 +209,4 @@
public HRegionServer getRegionServer(int serverNumber) {
return hbaseCluster.getRegionServer(serverNumber);
}
-}
+}
\ No newline at end of file
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java Tue Jan 26 02:28:18 2010
@@ -863,7 +863,7 @@
// mangle the conf so that the fs parameter points to the minidfs we
// just started up
FileSystem fs = dfsCluster.getFileSystem();
- conf.set("fs.default.name", fs.getUri().toString());
+ conf.set("fs.defaultFS", fs.getUri().toString());
Path parentdir = fs.getHomeDirectory();
conf.set(HConstants.HBASE_DIR, parentdir.toString());
fs.mkdirs(parentdir);
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java Tue Jan 26 02:28:18 2010
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hdfs.MiniDFSCluster;
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java Tue Jan 26 02:28:18 2010
@@ -21,14 +21,14 @@
import java.io.IOException;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.SequenceFile;
@@ -63,31 +63,6 @@
}
/**
- * Test the findMemstoresWithEditsOlderThan method.
- * @throws IOException
- */
- public void testFindMemstoresWithEditsOlderThan() throws IOException {
- Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>();
- for (int i = 0; i < 10; i++) {
- Long l = new Long(i);
- regionsToSeqids.put(l.toString().getBytes(), l);
- }
- byte [][] regions =
- HLog.findMemstoresWithEditsOlderThan(1, regionsToSeqids);
- assertEquals(1, regions.length);
- assertTrue(Bytes.equals(regions[0], "0".getBytes()));
- regions = HLog.findMemstoresWithEditsOlderThan(3, regionsToSeqids);
- int count = 3;
- assertEquals(count, regions.length);
- // Regions returned are not ordered.
- for (int i = 0; i < count; i++) {
- assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
- Bytes.equals(regions[i], "1".getBytes()) ||
- Bytes.equals(regions[i], "2".getBytes()));
- }
- }
-
- /**
* Just write multiple logs then split. Before fix for HADOOP-2283, this
* would fail.
* @throws IOException
@@ -108,7 +83,7 @@
column));
System.out.println("Region " + i + ": " + edit);
log.append(Bytes.toBytes("" + i), tableName, edit,
- false, System.currentTimeMillis());
+ System.currentTimeMillis());
}
}
log.rollWriter();
@@ -174,7 +149,7 @@
cols.add(new KeyValue(row, Bytes.toBytes("column:" + Integer.toString(i)),
timestamp, new byte[] { (byte)(i + '0') }));
}
- log.append(regionName, tableName, cols, false, System.currentTimeMillis());
+ log.append(regionName, tableName, cols, System.currentTimeMillis());
long logSeqId = log.startCacheFlush();
log.completeCacheFlush(regionName, tableName, logSeqId);
log.close();
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java Tue Jan 26 02:28:18 2010
@@ -46,13 +46,10 @@
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.regionserver.HRegion.RegionScanner;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import junit.framework.Assert;
/**
* Basic stand-alone testing of HRegion.
@@ -1781,325 +1778,6 @@
}
}
- /**
- * Flushes the cache in a thread while scanning. The tests verify that the
- * scan is coherent - e.g. the returned results are always of the same or
- * later update as the previous results.
- * @throws IOException scan / compact
- * @throws InterruptedException thread join
- */
- public void testFlushCacheWhileScanning() throws IOException, InterruptedException {
- byte[] tableName = Bytes.toBytes("testFlushCacheWhileScanning");
- byte[] family = Bytes.toBytes("family");
- int numRows = 1000;
- int flushAndScanInterval = 10;
- int compactInterval = 10 * flushAndScanInterval;
-
- String method = "testFlushCacheWhileScanning";
- initHRegion(tableName,method, family);
- FlushThread flushThread = new FlushThread();
- flushThread.start();
-
- Scan scan = new Scan();
- scan.addFamily(family);
- scan.setFilter(new SingleColumnValueFilter(family, qual1,
- CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(5L))));
-
- int expectedCount = 0;
- List<KeyValue> res = new ArrayList<KeyValue>();
-
- boolean toggle=true;
- for (long i = 0; i < numRows; i++) {
- Put put = new Put(Bytes.toBytes(i));
- put.add(family, qual1, Bytes.toBytes(i % 10));
- region.put(put);
-
- if (i != 0 && i % compactInterval == 0) {
- //System.out.println("iteration = " + i);
- region.compactStores(true);
- }
-
- if (i % 10 == 5L) {
- expectedCount++;
- }
-
- if (i != 0 && i % flushAndScanInterval == 0) {
- res.clear();
- InternalScanner scanner = region.getScanner(scan);
- if (toggle) {
- flushThread.flush();
- }
- while (scanner.next(res)) ;
- if (!toggle) {
- flushThread.flush();
- }
- Assert.assertEquals("i=" + i, expectedCount, res.size());
- toggle = !toggle;
- }
- }
-
- flushThread.done();
- flushThread.join();
- flushThread.checkNoError();
- }
-
- protected class FlushThread extends Thread {
- private volatile boolean done;
- private Throwable error = null;
-
- public void done() {
- done = true;
- synchronized (this) {
- interrupt();
- }
- }
-
- public void checkNoError() {
- if (error != null) {
- Assert.assertNull(error);
- }
- }
-
- @Override
- public void run() {
- done = false;
- while (!done) {
- synchronized (this) {
- try {
- wait();
- } catch (InterruptedException ignored) {
- if (done) {
- break;
- }
- }
- }
- try {
- region.flushcache();
- } catch (IOException e) {
- if (!done) {
- LOG.error("Error while flusing cache", e);
- error = e;
- }
- break;
- }
- }
-
- }
-
- public void flush() {
- synchronized (this) {
- notify();
- }
-
- }
- }
-
- /**
- * Writes very wide records and scans for the latest every time..
- * Flushes and compacts the region every now and then to keep things
- * realistic.
- *
- * @throws IOException by flush / scan / compaction
- * @throws InterruptedException when joining threads
- */
- public void testWritesWhileScanning()
- throws IOException, InterruptedException {
- byte[] tableName = Bytes.toBytes("testWritesWhileScanning");
- int testCount = 100;
- int numRows = 1;
- int numFamilies = 10;
- int numQualifiers = 100;
- int flushInterval = 7;
- int compactInterval = 5 * flushInterval;
- byte[][] families = new byte[numFamilies][];
- for (int i = 0; i < numFamilies; i++) {
- families[i] = Bytes.toBytes("family" + i);
- }
- byte[][] qualifiers = new byte[numQualifiers][];
- for (int i = 0; i < numQualifiers; i++) {
- qualifiers[i] = Bytes.toBytes("qual" + i);
- }
-
- String method = "testWritesWhileScanning";
- initHRegion(tableName, method, families);
- PutThread putThread = new PutThread(numRows, families, qualifiers);
- putThread.start();
- FlushThread flushThread = new FlushThread();
- flushThread.start();
-
- Scan scan = new Scan();
- scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL,
- new BinaryComparator(Bytes.toBytes("row0"))));
-
- int expectedCount = numFamilies * numQualifiers;
- List<KeyValue> res = new ArrayList<KeyValue>();
-
- long prevTimestamp = 0L;
- for (int i = 0; i < testCount; i++) {
-
- if (i != 0 && i % compactInterval == 0) {
- region.compactStores(true);
- }
-
- if (i != 0 && i % flushInterval == 0) {
- //System.out.println("scan iteration = " + i);
- flushThread.flush();
- }
-
- boolean previousEmpty = res.isEmpty();
- res.clear();
- InternalScanner scanner = region.getScanner(scan);
- while (scanner.next(res)) ;
- if (!res.isEmpty() || !previousEmpty || i > compactInterval) {
- Assert.assertEquals("i=" + i, expectedCount, res.size());
- long timestamp = res.get(0).getTimestamp();
- Assert.assertTrue(timestamp >= prevTimestamp);
- prevTimestamp = timestamp;
- }
- }
-
- putThread.done();
- putThread.join();
- putThread.checkNoError();
-
- flushThread.done();
- flushThread.join();
- flushThread.checkNoError();
- }
-
- protected class PutThread extends Thread {
- private volatile boolean done;
- private Throwable error = null;
- private int numRows;
- private byte[][] families;
- private byte[][] qualifiers;
-
- private PutThread(int numRows, byte[][] families,
- byte[][] qualifiers) {
- this.numRows = numRows;
- this.families = families;
- this.qualifiers = qualifiers;
- }
-
- public void done() {
- done = true;
- synchronized (this) {
- interrupt();
- }
- }
-
- public void checkNoError() {
- if (error != null) {
- Assert.assertNull(error);
- }
- }
-
- @Override
- public void run() {
- done = false;
- int val = 0;
- while (!done) {
- try {
- for (int r = 0; r < numRows; r++) {
- byte[] row = Bytes.toBytes("row" + r);
- Put put = new Put(row);
- for (int f = 0; f < families.length; f++) {
- for (int q = 0; q < qualifiers.length; q++) {
- put.add(families[f], qualifiers[q], (long) val,
- Bytes.toBytes(val));
- }
- }
- region.put(put);
- if (val > 0 && val % 47 == 0){
- //System.out.println("put iteration = " + val);
- Delete delete = new Delete(row, (long)val-30, null);
- region.delete(delete, null, true);
- }
- val++;
- }
- } catch (IOException e) {
- LOG.error("error while putting records", e);
- error = e;
- break;
- }
- }
-
- }
-
- }
-
-
- /**
- * Writes very wide records and gets the latest row every time..
- * Flushes and compacts the region every now and then to keep things
- * realistic.
- *
- * @throws IOException by flush / scan / compaction
- * @throws InterruptedException when joining threads
- */
- public void testWritesWhileGetting()
- throws IOException, InterruptedException {
- byte[] tableName = Bytes.toBytes("testWritesWhileScanning");
- int testCount = 200;
- int numRows = 1;
- int numFamilies = 10;
- int numQualifiers = 100;
- int flushInterval = 10;
- int compactInterval = 10 * flushInterval;
- byte[][] families = new byte[numFamilies][];
- for (int i = 0; i < numFamilies; i++) {
- families[i] = Bytes.toBytes("family" + i);
- }
- byte[][] qualifiers = new byte[numQualifiers][];
- for (int i = 0; i < numQualifiers; i++) {
- qualifiers[i] = Bytes.toBytes("qual" + i);
- }
-
- String method = "testWritesWhileScanning";
- initHRegion(tableName, method, families);
- PutThread putThread = new PutThread(numRows, families, qualifiers);
- putThread.start();
- FlushThread flushThread = new FlushThread();
- flushThread.start();
-
- Get get = new Get(Bytes.toBytes("row0"));
- Result result = null;
-
- int expectedCount = numFamilies * numQualifiers;
-
- long prevTimestamp = 0L;
- for (int i = 0; i < testCount; i++) {
-
- if (i != 0 && i % compactInterval == 0) {
- region.compactStores(true);
- }
-
- if (i != 0 && i % flushInterval == 0) {
- //System.out.println("iteration = " + i);
- flushThread.flush();
- }
-
- boolean previousEmpty = result == null || result.isEmpty();
- result = region.get(get, null);
- if (!result.isEmpty() || !previousEmpty || i > compactInterval) {
- Assert.assertEquals("i=" + i, expectedCount, result.size());
- long timestamp =
- result.getCellValue(families[0], qualifiers[0]).getTimestamp();
- Assert.assertTrue(timestamp >= prevTimestamp);
- prevTimestamp = timestamp;
- }
- }
-
- putThread.done();
- putThread.join();
- putThread.checkNoError();
-
- flushThread.done();
- flushThread.join();
- flushThread.checkNoError();
- }
-
-
public void testIndexesScanWithOneDeletedRow() throws IOException {
byte[] tableName = Bytes.toBytes("testIndexesScanWithOneDeletedRow");
byte[] family = Bytes.toBytes("family");
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java Tue Jan 26 02:28:18 2010
@@ -10,6 +10,7 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.Progressable;
Added: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java?rev=903054&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java Tue Jan 26 02:28:18 2010
@@ -0,0 +1,306 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+
+/** JUnit test case for HLog */
+public class TestHLog extends HBaseTestCase implements HConstants {
+ private Path dir;
+ private MiniDFSCluster cluster;
+
+ @Override
+ public void setUp() throws Exception {
+ // Enable append for these tests.
+ this.conf.setBoolean("dfs.support.append", true);
+ // Make block sizes small.
+ this.conf.setInt("dfs.blocksize", 1024 * 1024);
+ this.conf.setInt("hbase.regionserver.flushlogentries", 1);
+ cluster = new MiniDFSCluster(conf, 3, true, (String[])null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.cluster.getFileSystem().getHomeDirectory().toString());
+ super.setUp();
+ this.dir = new Path("/hbase", getName());
+ if (fs.exists(dir)) {
+ fs.delete(dir, true);
+ }
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ if (this.fs.exists(this.dir)) {
+ this.fs.delete(this.dir, true);
+ }
+ shutdownDfs(cluster);
+ super.tearDown();
+ }
+
+ /**
+ * Just write multiple logs then split. Before fix for HADOOP-2283, this
+ * would fail.
+ * @throws IOException
+ */
+ public void testSplit() throws IOException {
+ final byte [] tableName = Bytes.toBytes(getName());
+ final byte [] rowName = tableName;
+ HLog log = new HLog(this.fs, this.dir, this.conf, null);
+ final int howmany = 3;
+ // Add edits for three regions.
+ try {
+ for (int ii = 0; ii < howmany; ii++) {
+ for (int i = 0; i < howmany; i++) {
+ for (int j = 0; j < howmany; j++) {
+ List<KeyValue> edit = new ArrayList<KeyValue>();
+ byte [] family = Bytes.toBytes("column");
+ byte [] qualifier = Bytes.toBytes(Integer.toString(j));
+ byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
+ edit.add(new KeyValue(rowName, family, qualifier,
+ System.currentTimeMillis(), column));
+ System.out.println("Region " + i + ": " + edit);
+ log.append(Bytes.toBytes("" + i), tableName, edit,
+ System.currentTimeMillis());
+ }
+ }
+ log.rollWriter();
+ }
+ List<Path> splits =
+ HLog.splitLog(this.testDir, this.dir, this.fs, this.conf);
+ verifySplits(splits, howmany);
+ log = null;
+ } finally {
+ if (log != null) {
+ log.closeAndDelete();
+ }
+ }
+ }
+
+ /**
+ * Test new HDFS-265 sync.
+ * @throws Exception
+ */
+ public void testSync() throws Exception {
+ byte [] bytes = Bytes.toBytes(getName());
+ // First verify that using streams all works.
+ Path p = new Path(this.dir, getName() + ".fsdos");
+ FSDataOutputStream out = fs.create(p);
+ out.write(bytes);
+ out.sync();
+ FSDataInputStream in = fs.open(p);
+ assertTrue(in.available() > 0);
+ byte [] buffer = new byte [1024];
+ int read = in.read(buffer);
+ assertEquals(bytes.length, read);
+ out.close();
+ in.close();
+ Path subdir = new Path(this.dir, "hlogdir");
+ HLog wal = new HLog(this.fs, subdir, this.conf, null);
+ final int total = 20;
+ for (int i = 0; i < total; i++) {
+ List<KeyValue> kvs = new ArrayList<KeyValue>();
+ kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
+ wal.append(bytes, bytes, kvs, System.currentTimeMillis());
+ }
+ // Now call sync and try reading. Opening a Reader before you sync just
+ // gives you EOFE.
+ wal.sync();
+ // Open a Reader.
+ Path walPath = wal.computeFilename(wal.getFilenum());
+ HLog.Reader reader = HLog.getReader(fs, walPath, conf);
+ int count = 0;
+ HLog.Entry entry = new HLog.Entry();
+ while ((entry = reader.next(entry)) != null) count++;
+ assertEquals(total, count);
+ reader.close();
+ // Add test that checks to see that an open of a Reader works on a file
+ // that has had a sync done on it.
+ for (int i = 0; i < total; i++) {
+ List<KeyValue> kvs = new ArrayList<KeyValue>();
+ kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
+ wal.append(bytes, bytes, kvs, System.currentTimeMillis());
+ }
+ reader = HLog.getReader(fs, walPath, conf);
+ count = 0;
+ while((entry = reader.next(entry)) != null) count++;
+ assertTrue(count >= total);
+ reader.close();
+ // If I sync, should see double the edits.
+ wal.sync();
+ reader = HLog.getReader(fs, walPath, conf);
+ count = 0;
+ while((entry = reader.next(entry)) != null) count++;
+ assertEquals(total * 2, count);
+ // Now do a test that ensures stuff works when we go over block boundary,
+ // especially that we return good length on file.
+ final byte [] value = new byte[1025 * 1024]; // Make a 1M value.
+ for (int i = 0; i < total; i++) {
+ List<KeyValue> kvs = new ArrayList<KeyValue>();
+ kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
+ wal.append(bytes, bytes, kvs, System.currentTimeMillis());
+ }
+ // Now I should have written out lots of blocks. Sync then read.
+ wal.sync();
+ reader = HLog.getReader(fs, walPath, conf);
+ count = 0;
+ while((entry = reader.next(entry)) != null) count++;
+ assertEquals(total * 3, count);
+ reader.close();
+ // Close it and ensure that closed, Reader gets right length also.
+ wal.close();
+ reader = HLog.getReader(fs, walPath, conf);
+ count = 0;
+ while((entry = reader.next(entry)) != null) count++;
+ assertEquals(total * 3, count);
+ reader.close();
+ }
+
+ /**
+ * Test the findMemstoresWithEditsOlderThan method.
+ * @throws IOException
+ */
+ public void testFindMemstoresWithEditsOlderThan() throws IOException {
+ Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>();
+ for (int i = 0; i < 10; i++) {
+ Long l = Long.valueOf(i);
+ regionsToSeqids.put(l.toString().getBytes(), l);
+ }
+ byte [][] regions =
+ HLog.findMemstoresWithEditsOlderThan(1, regionsToSeqids);
+ assertEquals(1, regions.length);
+ assertTrue(Bytes.equals(regions[0], "0".getBytes()));
+ regions = HLog.findMemstoresWithEditsOlderThan(3, regionsToSeqids);
+ int count = 3;
+ assertEquals(count, regions.length);
+ // Regions returned are not ordered.
+ for (int i = 0; i < count; i++) {
+ assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
+ Bytes.equals(regions[i], "1".getBytes()) ||
+ Bytes.equals(regions[i], "2".getBytes()));
+ }
+ }
+
+ private void verifySplits(List<Path> splits, final int howmany)
+ throws IOException {
+ assertEquals(howmany, splits.size());
+ for (int i = 0; i < splits.size(); i++) {
+ HLog.Reader reader = HLog.getReader(this.fs, splits.get(i), conf);
+ try {
+ int count = 0;
+ String previousRegion = null;
+ long seqno = -1;
+ HLog.Entry entry = new HLog.Entry();
+ while((entry = reader.next(entry)) != null) {
+ HLogKey key = entry.getKey();
+ KeyValue kv = entry.getEdit();
+ String region = Bytes.toString(key.getRegionName());
+ // Assert that all edits are for same region.
+ if (previousRegion != null) {
+ assertEquals(previousRegion, region);
+ }
+ assertTrue(seqno < key.getLogSeqNum());
+ seqno = key.getLogSeqNum();
+ previousRegion = region;
+ System.out.println(key + " " + kv);
+ count++;
+ }
+ assertEquals(howmany * howmany, count);
+ } finally {
+ reader.close();
+ }
+ }
+ }
+
+ /**
+ * Tests that we can write out an edit, close, and then read it back in again.
+ * @throws IOException
+ */
+ public void testEditAdd() throws IOException {
+ final int COL_COUNT = 10;
+ final byte [] regionName = Bytes.toBytes("regionname");
+ final byte [] tableName = Bytes.toBytes("tablename");
+ final byte [] row = Bytes.toBytes("row");
+ HLog.Reader reader = null;
+ HLog log = new HLog(fs, dir, this.conf, null);
+ try {
+ // Write columns named 1, 2, 3, etc. and then values of single byte
+ // 1, 2, 3...
+ long timestamp = System.currentTimeMillis();
+ List<KeyValue> cols = new ArrayList<KeyValue>();
+ for (int i = 0; i < COL_COUNT; i++) {
+ cols.add(new KeyValue(row, Bytes.toBytes("column"),
+ Bytes.toBytes(Integer.toString(i)),
+ timestamp, new byte[] { (byte)(i + '0') }));
+ }
+ log.append(regionName, tableName, cols, System.currentTimeMillis());
+ long logSeqId = log.startCacheFlush();
+ log.completeCacheFlush(regionName, tableName, logSeqId);
+ log.close();
+ Path filename = log.computeFilename(log.getFilenum());
+ log = null;
+ // Now open a reader on the log and assert append worked.
+ reader = HLog.getReader(fs, filename, conf);
+ HLog.Entry entry = new HLog.Entry();
+ for (int i = 0; i < COL_COUNT; i++) {
+ reader.next(entry);
+ HLogKey key = entry.getKey();
+ KeyValue val = entry.getEdit();
+ assertTrue(Bytes.equals(regionName, key.getRegionName()));
+ assertTrue(Bytes.equals(tableName, key.getTablename()));
+ assertTrue(Bytes.equals(row, val.getRow()));
+ assertEquals((byte)(i + '0'), val.getValue()[0]);
+ System.out.println(key + " " + val);
+ }
+ while ((entry = reader.next(null)) != null) {
+ HLogKey key = entry.getKey();
+ KeyValue val = entry.getEdit();
+ // Assert only one more row... the meta flushed row.
+ assertTrue(Bytes.equals(regionName, key.getRegionName()));
+ assertTrue(Bytes.equals(tableName, key.getTablename()));
+ assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
+ assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
+ assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
+ val.getValue()));
+ System.out.println(key + " " + val);
+ }
+ } finally {
+ if (log != null) {
+ log.closeAndDelete();
+ }
+ if (reader != null) {
+ reader.close();
+ }
+ }
+ }
+}
Added: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java?rev=903054&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java Tue Jan 26 02:28:18 2010
@@ -0,0 +1,157 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Test log deletion as logs are rolled.
+ */
+public class TestLogRolling extends HBaseClusterTestCase {
+ private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
+ private HRegionServer server;
+ private HLog log;
+ private String tableName;
+ private byte[] value;
+
+ /**
+ * constructor
+ * @throws Exception
+ */
+ public TestLogRolling() throws Exception {
+ // start one regionserver and a minidfs.
+ super();
+ try {
+ this.server = null;
+ this.log = null;
+ this.tableName = null;
+ this.value = null;
+
+ String className = this.getClass().getName();
+ StringBuilder v = new StringBuilder(className);
+ while (v.length() < 1000) {
+ v.append(className);
+ }
+ value = Bytes.toBytes(v.toString());
+
+ } catch (Exception e) {
+ LOG.fatal("error in constructor", e);
+ throw e;
+ }
+ }
+
+ // Need to override this setup so we can edit the config before it gets sent
+ // to the cluster startup.
+ @Override
+ protected void preHBaseClusterSetup() {
+ // Force a region split after every 768KB
+ conf.setLong("hbase.hregion.max.filesize", 768L * 1024L);
+
+ // We roll the log after every 32 writes
+ conf.setInt("hbase.regionserver.maxlogentries", 32);
+
+ // For less frequently updated regions flush after every 2 flushes
+ conf.setInt("hbase.hregion.memstore.optionalflushcount", 2);
+
+ // We flush the cache after every 8192 bytes
+ conf.setInt("hbase.hregion.memstore.flush.size", 8192);
+
+ // Increase the amount of time between client retries
+ conf.setLong("hbase.client.pause", 15 * 1000);
+
+ // Reduce thread wake frequency so that other threads can get
+ // a chance to run.
+ conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
+ }
+
+ private void startAndWriteData() throws Exception {
+ // When the META table can be opened, the region servers are running
+ new HTable(conf, HConstants.META_TABLE_NAME);
+ this.server = cluster.getRegionThreads().get(0).getRegionServer();
+ this.log = server.getLog();
+
+ // Create the test table and open it
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ admin.createTable(desc);
+ HTable table = new HTable(conf, tableName);
+ for (int i = 1; i <= 256; i++) { // 256 writes should cause 8 log rolls
+ Put put = new Put(Bytes.toBytes("row" + String.format("%1$04d", i)));
+ put.add(HConstants.CATALOG_FAMILY, null, value);
+ table.put(put);
+ if (i % 32 == 0) {
+ // After every 32 writes sleep to let the log roller run
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ // continue
+ }
+ }
+ }
+ }
+
+ /**
+ * Tests that logs are deleted
+ *
+ * @throws Exception
+ */
+ public void testLogRolling() throws Exception {
+ this.tableName = getName();
+ try {
+ startAndWriteData();
+ LOG.info("after writing there are " + log.getNumLogFiles() + " log files");
+
+ // flush all regions
+
+ List<HRegion> regions =
+ new ArrayList<HRegion>(server.getOnlineRegions());
+ for (HRegion r: regions) {
+ r.flushcache();
+ }
+
+ // Now roll the log
+ log.rollWriter();
+
+ int count = log.getNumLogFiles();
+ LOG.info("after flushing all regions and rolling logs there are " +
+ log.getNumLogFiles() + " log files");
+ assertTrue(("actual count: " + count), count <= 2);
+ } catch (Exception e) {
+ LOG.fatal("unexpected exception", e);
+ throw e;
+ }
+ }
+}
\ No newline at end of file
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java?rev=903054&r1=903053&r2=903054&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.21/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java Tue Jan 26 02:28:18 2010
@@ -39,9 +39,9 @@
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.regionserver.HLog;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.util.ToolRunner;
/** Test stand alone merge tool that can merge arbitrary regions */
@@ -113,7 +113,7 @@
// Start up dfs
this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
this.fs = this.dfsCluster.getFileSystem();
- conf.set("fs.default.name", fs.getUri().toString());
+ conf.set("fs.defaultFS", fs.getUri().toString());
Path parentdir = fs.getHomeDirectory();
conf.set(HConstants.HBASE_DIR, parentdir.toString());
fs.mkdirs(parentdir);