You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by br...@apache.org on 2008/02/24 01:19:44 UTC
svn commit: r630550 [6/7] - in /hadoop/hbase/trunk: bin/ conf/
src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/
src/java/org/apache/hadoop/hbase/filter/
src/java/org/apache/hadoop/hbase/generated/regionserver/ src/java/org/apa...
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteFamily.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteFamily.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteFamily.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteFamily.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,172 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.HBaseTestCase;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+
+/**
+ * Test the functionality of deleteFamily.
+ */
+public class TestDeleteFamily extends HBaseTestCase {
+ static final Log LOG = LogFactory.getLog(TestDeleteFamily.class);
+ private MiniDFSCluster miniHdfs;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.miniHdfs.getFileSystem().getHomeDirectory().toString());
+ }
+
+ /**
+ * Tests for HADOOP-2384.
+ * @throws Exception
+ */
+ public void testDeleteFamily() throws Exception {
+ HRegion region = null;
+ HRegionIncommon region_incommon = null;
+ try {
+ HTableDescriptor htd = createTableDescriptor(getName());
+ region = createNewHRegion(htd, null, null);
+ region_incommon = new HRegionIncommon(region);
+
+ // test memcache
+ makeSureItWorks(region, region_incommon, false);
+ // test hstore
+ makeSureItWorks(region, region_incommon, true);
+
+ } finally {
+ if (region != null) {
+ try {
+ region.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ region.getLog().closeAndDelete();
+ }
+ }
+ }
+
+ private void makeSureItWorks(HRegion region, HRegionIncommon region_incommon,
+ boolean flush)
+ throws Exception{
+ // insert a few versions worth of data for a row
+ Text row = new Text("test_row");
+ long t0 = System.currentTimeMillis();
+ long t1 = t0 - 15000;
+ long t2 = t1 - 15000;
+
+ Text colA = new Text(COLUMNS[0].toString() + "a");
+ Text colB = new Text(COLUMNS[0].toString() + "b");
+ Text colC = new Text(COLUMNS[1].toString() + "c");
+
+ long lock = region_incommon.startUpdate(row);
+ region_incommon.put(lock, colA, cellData(0, flush).getBytes());
+ region_incommon.put(lock, colB, cellData(0, flush).getBytes());
+ region_incommon.put(lock, colC, cellData(0, flush).getBytes());
+ region_incommon.commit(lock, t0);
+
+ lock = region_incommon.startUpdate(row);
+ region_incommon.put(lock, colA, cellData(1, flush).getBytes());
+ region_incommon.put(lock, colB, cellData(1, flush).getBytes());
+ region_incommon.put(lock, colC, cellData(1, flush).getBytes());
+ region_incommon.commit(lock, t1);
+
+ lock = region_incommon.startUpdate(row);
+ region_incommon.put(lock, colA, cellData(2, flush).getBytes());
+ region_incommon.put(lock, colB, cellData(2, flush).getBytes());
+ region_incommon.put(lock, colC, cellData(2, flush).getBytes());
+ region_incommon.commit(lock, t2);
+
+ if (flush) {region_incommon.flushcache();}
+
+ // call delete family at a timestamp, make sure only the most recent stuff
+ // for column c is left behind
+ region.deleteFamily(row, COLUMNS[0], t1);
+ if (flush) {region_incommon.flushcache();}
+ // most recent for A,B,C should be fine
+ // A,B at older timestamps should be gone
+ // C should be fine for older timestamps
+ assertCellValueEquals(region, row, colA, t0, cellData(0, flush));
+ assertCellValueEquals(region, row, colA, t1, null);
+ assertCellValueEquals(region, row, colA, t2, null);
+ assertCellValueEquals(region, row, colB, t0, cellData(0, flush));
+ assertCellValueEquals(region, row, colB, t1, null);
+ assertCellValueEquals(region, row, colB, t2, null);
+ assertCellValueEquals(region, row, colC, t0, cellData(0, flush));
+ assertCellValueEquals(region, row, colC, t1, cellData(1, flush));
+ assertCellValueEquals(region, row, colC, t2, cellData(2, flush));
+
+ // call delete family w/o a timestamp, make sure nothing is left except for
+ // column C.
+ region.deleteFamily(row, COLUMNS[0], HConstants.LATEST_TIMESTAMP);
+ if (flush) {region_incommon.flushcache();}
+ // A,B for latest timestamp should be gone
+ // C should still be fine
+ assertCellValueEquals(region, row, colA, t0, null);
+ assertCellValueEquals(region, row, colB, t0, null);
+ assertCellValueEquals(region, row, colC, t0, cellData(0, flush));
+ assertCellValueEquals(region, row, colC, t1, cellData(1, flush));
+ assertCellValueEquals(region, row, colC, t2, cellData(2, flush));
+
+ }
+
+ private void assertCellValueEquals(final HRegion region, final Text row,
+ final Text column, final long timestamp, final String value)
+ throws IOException {
+ Map<Text, byte[]> result = region.getFull(row, timestamp);
+ byte[] cell_value = result.get(column);
+ if(value == null){
+ assertEquals(column.toString() + " at timestamp " + timestamp, null, cell_value);
+ } else {
+ if (cell_value == null) {
+ fail(column.toString() + " at timestamp " + timestamp +
+ "\" was expected to be \"" + value + " but was null");
+ }
+ assertEquals(column.toString() + " at timestamp "
+ + timestamp, value, new String(cell_value));
+ }
+ }
+
+ private String cellData(int tsNum, boolean flush){
+ return "t" + tsNum + " data" + (flush ? " - with flush" : "");
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ if (this.miniHdfs != null) {
+ this.miniHdfs.shutdown();
+ }
+ super.tearDown();
+ }
+}
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,167 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.HBaseTestCase;
+
+import org.apache.hadoop.hbase.util.Writables;
+
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+
+
+/** Test case for get */
+public class TestGet extends HBaseTestCase {
+ private static final Log LOG = LogFactory.getLog(TestGet.class.getName());
+
+ private static final Text CONTENTS = new Text("contents:");
+ private static final Text ROW_KEY =
+ new Text(HRegionInfo.rootRegionInfo.getRegionName());
+ private static final String SERVER_ADDRESS = "foo.bar.com:1234";
+
+
+
+ private void verifyGet(final HRegionIncommon r, final String expectedServer)
+ throws IOException {
+ // This should return a value because there is only one family member
+ byte [] value = r.get(ROW_KEY, CONTENTS);
+ assertNotNull(value);
+
+ // This should not return a value because there are multiple family members
+ value = r.get(ROW_KEY, HConstants.COLUMN_FAMILY);
+ assertNull(value);
+
+ // Find out what getFull returns
+ Map<Text, byte []> values = r.getFull(ROW_KEY);
+
+ // assertEquals(4, values.keySet().size());
+ for(Iterator<Text> i = values.keySet().iterator(); i.hasNext(); ) {
+ Text column = i.next();
+ if (column.equals(HConstants.COL_SERVER)) {
+ String server = Writables.bytesToString(values.get(column));
+ assertEquals(expectedServer, server);
+ LOG.info(server);
+ }
+ }
+ }
+
+ /**
+ * the test
+ * @throws IOException
+ */
+ public void testGet() throws IOException {
+ MiniDFSCluster cluster = null;
+ HRegion region = null;
+
+ try {
+
+ // Initialization
+
+ cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ cluster.getFileSystem().getHomeDirectory().toString());
+
+ HTableDescriptor desc = new HTableDescriptor("test");
+ desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
+ desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
+
+ region = createNewHRegion(desc, null, null);
+ HRegionIncommon r = new HRegionIncommon(region);
+
+ // Write information to the table
+
+ long lockid = r.startUpdate(ROW_KEY);
+ r.put(lockid, CONTENTS, Writables.getBytes(CONTENTS));
+ r.put(lockid, HConstants.COL_REGIONINFO,
+ Writables.getBytes(HRegionInfo.rootRegionInfo));
+ r.commit(lockid, System.currentTimeMillis());
+
+ lockid = r.startUpdate(ROW_KEY);
+ r.put(lockid, HConstants.COL_SERVER,
+ Writables.stringToBytes(new HServerAddress(SERVER_ADDRESS).toString()));
+ r.put(lockid, HConstants.COL_STARTCODE, Writables.longToBytes(lockid));
+ r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
+ "region".getBytes(HConstants.UTF8_ENCODING));
+ r.commit(lockid, System.currentTimeMillis());
+
+ // Verify that get works the same from memcache as when reading from disk
+ // NOTE dumpRegion won't work here because it only reads from disk.
+
+ verifyGet(r, SERVER_ADDRESS);
+
+ // Close and re-open region, forcing updates to disk
+
+ region.close();
+ region = openClosedRegion(region);
+ r = new HRegionIncommon(region);
+
+ // Read it back
+
+ verifyGet(r, SERVER_ADDRESS);
+
+ // Update one family member and add a new one
+
+ lockid = r.startUpdate(ROW_KEY);
+ r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "region"),
+ "region2".getBytes(HConstants.UTF8_ENCODING));
+ String otherServerName = "bar.foo.com:4321";
+ r.put(lockid, HConstants.COL_SERVER,
+ Writables.stringToBytes(new HServerAddress(otherServerName).toString()));
+ r.put(lockid, new Text(HConstants.COLUMN_FAMILY + "junk"),
+ "junk".getBytes(HConstants.UTF8_ENCODING));
+ r.commit(lockid, System.currentTimeMillis());
+
+ verifyGet(r, otherServerName);
+
+ // Close region and re-open it
+
+ region.close();
+ region = openClosedRegion(region);
+ r = new HRegionIncommon(region);
+
+ // Read it back
+
+ verifyGet(r, otherServerName);
+
+ } finally {
+ if (region != null) {
+ // Close region once and for all
+ region.close();
+ region.getLog().closeAndDelete();
+ }
+ if (cluster != null) {
+ StaticTestEnvironment.shutdownDfs(cluster);
+ }
+ }
+ }
+}
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,262 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.hbase.filter.StopRowFilter;
+import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HScannerInterface;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+
+
+/**
+ * {@link TestGet} is a medley of tests of get all done up as a single test.
+ * This class
+ */
+public class TestGet2 extends HBaseTestCase {
+ private MiniDFSCluster miniHdfs;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.miniHdfs.getFileSystem().getHomeDirectory().toString());
+ }
+
+ /**
+ * Tests for HADOOP-2161.
+ * @throws Exception
+ */
+ public void testGetFull() throws Exception {
+ HRegion region = null;
+ HScannerInterface scanner = null;
+ try {
+ HTableDescriptor htd = createTableDescriptor(getName());
+ region = createNewHRegion(htd, null, null);
+ for (int i = 0; i < COLUMNS.length; i++) {
+ addContent(region, COLUMNS[i].toString());
+ }
+ // Find two rows to use doing getFull.
+ final Text arbitraryStartRow = new Text("b");
+ Text actualStartRow = null;
+ final Text arbitraryStopRow = new Text("c");
+ Text actualStopRow = null;
+ Text [] columns = new Text [] {new Text(COLFAMILY_NAME1)};
+ scanner = region.getScanner(columns,
+ arbitraryStartRow, HConstants.LATEST_TIMESTAMP,
+ new WhileMatchRowFilter(new StopRowFilter(arbitraryStopRow)));
+ HStoreKey key = new HStoreKey();
+ TreeMap<Text, byte[]> value = new TreeMap<Text, byte []>();
+ while (scanner.next(key, value)) {
+ if (actualStartRow == null) {
+ actualStartRow = new Text(key.getRow());
+ } else {
+ actualStopRow = key.getRow();
+ }
+ }
+ // Assert I got all out.
+ assertColumnsPresent(region, actualStartRow);
+ assertColumnsPresent(region, actualStopRow);
+ // Force a flush so store files come into play.
+ region.flushcache();
+ // Assert I got all out.
+ assertColumnsPresent(region, actualStartRow);
+ assertColumnsPresent(region, actualStopRow);
+ } finally {
+ if (scanner != null) {
+ scanner.close();
+ }
+ if (region != null) {
+ try {
+ region.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ region.getLog().closeAndDelete();
+ }
+ }
+ }
+
+ /**
+ * @throws IOException
+ */
+ public void testGetAtTimestamp() throws IOException{
+ HRegion region = null;
+ HRegionIncommon region_incommon = null;
+ try {
+ HTableDescriptor htd = createTableDescriptor(getName());
+ region = createNewHRegion(htd, null, null);
+ region_incommon = new HRegionIncommon(region);
+
+ long right_now = System.currentTimeMillis();
+ long one_second_ago = right_now - 1000;
+
+ Text t = new Text("test_row");
+ long lockid = region_incommon.startUpdate(t);
+ region_incommon.put(lockid, COLUMNS[0], "old text".getBytes());
+ region_incommon.commit(lockid, one_second_ago);
+
+ lockid = region_incommon.startUpdate(t);
+ region_incommon.put(lockid, COLUMNS[0], "new text".getBytes());
+ region_incommon.commit(lockid, right_now);
+
+ assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text");
+ assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text");
+
+ // Force a flush so store files come into play.
+ region_incommon.flushcache();
+
+ assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text");
+ assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text");
+
+ } finally {
+ if (region != null) {
+ try {
+ region.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ region.getLog().closeAndDelete();
+ }
+ }
+ }
+
+ /** For HADOOP-2443 */
+ public void testGetClosestRowBefore() throws IOException{
+
+ HRegion region = null;
+ HRegionIncommon region_incommon = null;
+
+ try {
+ HTableDescriptor htd = createTableDescriptor(getName());
+ HRegionInfo hri = new HRegionInfo(htd, null, null);
+ region = createNewHRegion(htd, null, null);
+ region_incommon = new HRegionIncommon(region);
+
+ // set up some test data
+ Text t10 = new Text("010");
+ Text t20 = new Text("020");
+ Text t30 = new Text("030");
+ Text t40 = new Text("040");
+
+ long lockid = region_incommon.startUpdate(t10);
+ region_incommon.put(lockid, COLUMNS[0], "t10 bytes".getBytes());
+ region_incommon.commit(lockid);
+
+ lockid = region_incommon.startUpdate(t20);
+ region_incommon.put(lockid, COLUMNS[0], "t20 bytes".getBytes());
+ region_incommon.commit(lockid);
+
+ lockid = region_incommon.startUpdate(t30);
+ region_incommon.put(lockid, COLUMNS[0], "t30 bytes".getBytes());
+ region_incommon.commit(lockid);
+
+ lockid = region_incommon.startUpdate(t40);
+ region_incommon.put(lockid, COLUMNS[0], "t40 bytes".getBytes());
+ region_incommon.commit(lockid);
+
+ // try finding "015"
+ Text t15 = new Text("015");
+ Map<Text, byte[]> results =
+ region.getClosestRowBefore(t15, HConstants.LATEST_TIMESTAMP);
+ assertEquals(new String(results.get(COLUMNS[0])), "t10 bytes");
+
+ // try "020", we should get that row exactly
+ results = region.getClosestRowBefore(t20, HConstants.LATEST_TIMESTAMP);
+ assertEquals(new String(results.get(COLUMNS[0])), "t20 bytes");
+
+ // try "050", should get stuff from "040"
+ Text t50 = new Text("050");
+ results = region.getClosestRowBefore(t50, HConstants.LATEST_TIMESTAMP);
+ assertEquals(new String(results.get(COLUMNS[0])), "t40 bytes");
+
+ // force a flush
+ region.flushcache();
+
+ // try finding "015"
+ results = region.getClosestRowBefore(t15, HConstants.LATEST_TIMESTAMP);
+ assertEquals(new String(results.get(COLUMNS[0])), "t10 bytes");
+
+ // try "020", we should get that row exactly
+ results = region.getClosestRowBefore(t20, HConstants.LATEST_TIMESTAMP);
+ assertEquals(new String(results.get(COLUMNS[0])), "t20 bytes");
+
+ // try "050", should get stuff from "040"
+ results = region.getClosestRowBefore(t50, HConstants.LATEST_TIMESTAMP);
+ assertEquals(new String(results.get(COLUMNS[0])), "t40 bytes");
+ } finally {
+ if (region != null) {
+ try {
+ region.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ region.getLog().closeAndDelete();
+ }
+ }
+ }
+
+
+ private void assertCellValueEquals(final HRegion region, final Text row,
+ final Text column, final long timestamp, final String value)
+ throws IOException {
+ Map<Text, byte[]> result = region.getFull(row, timestamp);
+ assertEquals("cell value at a given timestamp", new String(result.get(column)), value);
+ }
+
+ private void assertColumnsPresent(final HRegion r, final Text row)
+ throws IOException {
+ Map<Text, byte[]> result = r.getFull(row);
+ int columnCount = 0;
+ for (Map.Entry<Text, byte[]> e: result.entrySet()) {
+ columnCount++;
+ String column = e.getKey().toString();
+ boolean legitColumn = false;
+ for (int i = 0; i < COLUMNS.length; i++) {
+ // Assert value is same as row. This is 'nature' of the data added.
+ assertTrue(row.equals(new Text(e.getValue())));
+ if (COLUMNS[i].equals(new Text(column))) {
+ legitColumn = true;
+ break;
+ }
+ }
+ assertTrue("is legit column name", legitColumn);
+ }
+ assertEquals("count of columns", columnCount, COLUMNS.length);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ if (this.miniHdfs != null) {
+ this.miniHdfs.shutdown();
+ }
+ super.tearDown();
+ }
+}
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,153 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.TreeMap;
+
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.SequenceFile.Reader;
+
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+
+/** JUnit test case for HLog */
+public class TestHLog extends HBaseTestCase implements HConstants {
+ private Path dir;
+ private MiniDFSCluster cluster;
+
+ /** {@inheritDoc} */
+ @Override
+ public void setUp() throws Exception {
+ cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.cluster.getFileSystem().getHomeDirectory().toString());
+ super.setUp();
+ this.dir = new Path("/hbase", getName());
+ if (fs.exists(dir)) {
+ fs.delete(dir);
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void tearDown() throws Exception {
+ if (this.fs.exists(this.dir)) {
+ this.fs.delete(this.dir);
+ }
+ StaticTestEnvironment.shutdownDfs(cluster);
+ super.tearDown();
+ }
+
+ /**
+ * Just write multiple logs then split. Before fix for HADOOP-2283, this
+ * would fail.
+ * @throws IOException
+ */
+ public void testSplit() throws IOException {
+ final Text tableName = new Text(getName());
+ final Text rowName = tableName;
+ HLog log = new HLog(this.fs, this.dir, this.conf, null);
+ // Add edits for three regions.
+ try {
+ for (int ii = 0; ii < 3; ii++) {
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 3; j++) {
+ TreeMap<HStoreKey, byte[]> edit = new TreeMap<HStoreKey, byte[]>();
+ Text column = new Text(Integer.toString(j));
+ edit.put(
+ new HStoreKey(rowName, column, System.currentTimeMillis()),
+ column.getBytes());
+ log.append(new Text(Integer.toString(i)), tableName, edit);
+ }
+ }
+ log.rollWriter();
+ }
+ HLog.splitLog(this.testDir, this.dir, this.fs, this.conf);
+ log = null;
+ } finally {
+ if (log != null) {
+ log.closeAndDelete();
+ }
+ }
+ }
+
+ /**
+ * @throws IOException
+ */
+ public void testAppend() throws IOException {
+ final int COL_COUNT = 10;
+ final Text regionName = new Text("regionname");
+ final Text tableName = new Text("tablename");
+ final Text row = new Text("row");
+ Reader reader = null;
+ HLog log = new HLog(fs, dir, this.conf, null);
+ try {
+ // Write columns named 1, 2, 3, etc. and then values of single byte
+ // 1, 2, 3...
+ long timestamp = System.currentTimeMillis();
+ TreeMap<HStoreKey, byte []> cols = new TreeMap<HStoreKey, byte []>();
+ for (int i = 0; i < COL_COUNT; i++) {
+ cols.put(new HStoreKey(row, new Text(Integer.toString(i)), timestamp),
+ new byte[] { (byte)(i + '0') });
+ }
+ log.append(regionName, tableName, cols);
+ long logSeqId = log.startCacheFlush();
+ log.completeCacheFlush(regionName, tableName, logSeqId);
+ log.close();
+ Path filename = log.computeFilename(log.filenum);
+ log = null;
+ // Now open a reader on the log and assert append worked.
+ reader = new SequenceFile.Reader(fs, filename, conf);
+ HLogKey key = new HLogKey();
+ HLogEdit val = new HLogEdit();
+ for (int i = 0; i < COL_COUNT; i++) {
+ reader.next(key, val);
+ assertEquals(regionName, key.getRegionName());
+ assertEquals(tableName, key.getTablename());
+ assertEquals(row, key.getRow());
+ assertEquals((byte)(i + '0'), val.getVal()[0]);
+ System.out.println(key + " " + val);
+ }
+ while (reader.next(key, val)) {
+ // Assert only one more row... the meta flushed row.
+ assertEquals(regionName, key.getRegionName());
+ assertEquals(tableName, key.getTablename());
+ assertEquals(HLog.METAROW, key.getRow());
+ assertEquals(HLog.METACOLUMN, val.getColumn());
+ assertEquals(0, HLogEdit.completeCacheFlush.compareTo(val.getVal()));
+ System.out.println(key + " " + val);
+ }
+ } finally {
+ if (log != null) {
+ log.closeAndDelete();
+ }
+ if (reader != null) {
+ reader.close();
+ }
+ }
+ }
+
+}
\ No newline at end of file
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,164 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.io.Text;
+
+import org.apache.hadoop.hbase.HConstants;
+
+/** memcache test case */
+public class TestHMemcache extends TestCase {
+
+ private HStore.Memcache hmemcache;
+
+ private static final int ROW_COUNT = 3;
+
+ private static final int COLUMNS_COUNT = 3;
+
+ private static final String COLUMN_FAMILY = "column";
+
+ /** {@inheritDoc} */
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ this.hmemcache = new HStore.Memcache();
+ }
+
+ private Text getRowName(final int index) {
+ return new Text("row" + Integer.toString(index));
+ }
+
+ private Text getColumnName(final int rowIndex, final int colIndex) {
+ return new Text(COLUMN_FAMILY + ":" + Integer.toString(rowIndex) + ";" +
+ Integer.toString(colIndex));
+ }
+
+ /**
+ * Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT}
+ * @param hmc Instance to add rows to.
+ */
+ private void addRows(final HStore.Memcache hmc)
+ throws UnsupportedEncodingException {
+
+ for (int i = 0; i < ROW_COUNT; i++) {
+ long timestamp = System.currentTimeMillis();
+ for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
+ Text k = getColumnName(i, ii);
+ hmc.add(new HStoreKey(getRowName(i), k, timestamp),
+ k.toString().getBytes(HConstants.UTF8_ENCODING));
+ }
+ }
+ }
+
+ private void runSnapshot(final HStore.Memcache hmc) {
+ // Save off old state.
+ int oldHistorySize = hmc.snapshot.size();
+ hmc.snapshot();
+ // Make some assertions about what just happened.
+ assertTrue("History size has not increased",
+ oldHistorySize < hmc.snapshot.size());
+ }
+
+ /**
+ * Test memcache snapshots
+ * @throws IOException
+ */
+ public void testSnapshotting() throws IOException {
+ final int snapshotCount = 5;
+ // Add some rows, run a snapshot. Do it a few times.
+ for (int i = 0; i < snapshotCount; i++) {
+ addRows(this.hmemcache);
+ runSnapshot(this.hmemcache);
+ this.hmemcache.getSnapshot();
+ assertEquals("History not being cleared", 0,
+ this.hmemcache.snapshot.size());
+ }
+ }
+
+ private void isExpectedRow(final int rowIndex, TreeMap<Text, byte []> row)
+ throws UnsupportedEncodingException {
+
+ int i = 0;
+ for (Text colname: row.keySet()) {
+ String expectedColname = getColumnName(rowIndex, i++).toString();
+ String colnameStr = colname.toString();
+ assertEquals("Column name", colnameStr, expectedColname);
+ // Value is column name as bytes. Usually result is
+ // 100 bytes in size at least. This is the default size
+ // for BytesWriteable. For comparison, comvert bytes to
+ // String and trim to remove trailing null bytes.
+ byte [] value = row.get(colname);
+ String colvalueStr = new String(value, HConstants.UTF8_ENCODING).trim();
+ assertEquals("Content", colnameStr, colvalueStr);
+ }
+ }
+
+ /** Test getFull from memcache
+ * @throws UnsupportedEncodingException
+ */
+ public void testGetFull() throws UnsupportedEncodingException {
+ addRows(this.hmemcache);
+ for (int i = 0; i < ROW_COUNT; i++) {
+ HStoreKey hsk = new HStoreKey(getRowName(i));
+ TreeMap<Text, byte []> all = new TreeMap<Text, byte[]>();
+ this.hmemcache.getFull(hsk, all);
+ isExpectedRow(i, all);
+ }
+ }
+
+ /**
+ * Test memcache scanner
+ * @throws IOException
+ */
+ public void testScanner() throws IOException {
+ addRows(this.hmemcache);
+ long timestamp = System.currentTimeMillis();
+ Text [] cols = new Text[COLUMNS_COUNT * ROW_COUNT];
+ for (int i = 0; i < ROW_COUNT; i++) {
+ for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
+ cols[(ii + (i * COLUMNS_COUNT))] = getColumnName(i, ii);
+ }
+ }
+ HInternalScannerInterface scanner =
+ this.hmemcache.getScanner(timestamp, cols, new Text());
+ HStoreKey key = new HStoreKey();
+ TreeMap<Text, byte []> results = new TreeMap<Text, byte []>();
+ for (int i = 0; scanner.next(key, results); i++) {
+ assertTrue("Row name",
+ key.toString().startsWith(getRowName(i).toString()));
+ assertEquals("Count of columns", COLUMNS_COUNT,
+ results.size());
+ TreeMap<Text, byte []> row = new TreeMap<Text, byte []>();
+ for(Map.Entry<Text, byte []> e: results.entrySet() ) {
+ row.put(e.getKey(), e.getValue());
+ }
+ isExpectedRow(i, row);
+ // Clear out set. Otherwise row results accumulate.
+ results.clear();
+ }
+ }
+}
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,821 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.TreeMap;
+
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.log4j.Logger;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HScannerInterface;
+
+/**
+ * Basic stand-alone testing of HRegion.
+ *
+ * A lot of the meta information for an HRegion now lives inside other
+ * HRegions or in the HBaseMaster, so only basic testing is possible.
+ */
+public class TestHRegion extends HBaseTestCase
+implements RegionUnavailableListener {
+ static final Logger LOG =
+ Logger.getLogger(TestHRegion.class.getName());
+
+ /**
+ * Since all the "tests" depend on the results of the previous test, they are
+ * not Junit tests that can stand alone. Consequently we have a single Junit
+ * test that runs the "sub-tests" as private methods.
+ * @throws IOException
+ */
+ public void testHRegion() throws IOException {
+ try {
+ setup();
+ locks();
+ badPuts();
+ basic();
+ scan();
+ batchWrite();
+ splitAndMerge();
+ read();
+ cleanup();
+ } finally {
+ if (r != null) {
+ r.close();
+ }
+ if (log != null) {
+ log.closeAndDelete();
+ }
+ StaticTestEnvironment.shutdownDfs(cluster);
+ }
+ }
+
+
+ private static final int FIRST_ROW = 1;
+ private static final int N_ROWS = 1000000;
+ private static final int NUM_VALS = 1000;
+ private static final Text CONTENTS_BASIC = new Text("contents:basic");
+ private static final String CONTENTSTR = "contentstr";
+ private static final String ANCHORNUM = "anchor:anchornum-";
+ private static final String ANCHORSTR = "anchorstr";
+ private static final Text CONTENTS_BODY = new Text("contents:body");
+ private static final Text CONTENTS_FIRSTCOL = new Text("contents:firstcol");
+ private static final Text ANCHOR_SECONDCOL = new Text("anchor:secondcol");
+
+ private MiniDFSCluster cluster = null;
+ private HLog log = null;
+ private HTableDescriptor desc = null;
+ HRegion r = null;
+ HRegionIncommon region = null;
+
+ private static int numInserted = 0;
+
+ // Create directories, start mini cluster, etc.
+
+ private void setup() throws IOException {
+
+ cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.cluster.getFileSystem().getHomeDirectory().toString());
+
+ desc = new HTableDescriptor("test");
+ desc.addFamily(new HColumnDescriptor("contents:"));
+ desc.addFamily(new HColumnDescriptor("anchor:"));
+ r = createNewHRegion(desc, null, null);
+ log = r.getLog();
+ region = new HRegionIncommon(r);
+ }
+
+ // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
+
+ private void basic() throws IOException {
+ long startTime = System.currentTimeMillis();
+
+ // Write out a bunch of values
+
+ for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
+ long writeid = region.startUpdate(new Text("row_" + k));
+ region.put(writeid, CONTENTS_BASIC,
+ (CONTENTSTR + k).getBytes(HConstants.UTF8_ENCODING));
+ region.put(writeid, new Text(ANCHORNUM + k),
+ (ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING));
+ region.commit(writeid, System.currentTimeMillis());
+ }
+ System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // Flush cache
+
+ startTime = System.currentTimeMillis();
+
+ region.flushcache();
+
+ System.out.println("Cache flush elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // Read them back in
+
+ startTime = System.currentTimeMillis();
+
+ Text collabel = null;
+ for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
+ Text rowlabel = new Text("row_" + k);
+
+ byte [] bodydata = region.get(rowlabel, CONTENTS_BASIC);
+ assertNotNull(bodydata);
+ String bodystr = new String(bodydata, HConstants.UTF8_ENCODING).trim();
+ String teststr = CONTENTSTR + k;
+ assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
+ + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+ bodystr, teststr);
+ collabel = new Text(ANCHORNUM + k);
+ bodydata = region.get(rowlabel, collabel);
+ bodystr = new String(bodydata, HConstants.UTF8_ENCODING).trim();
+ teststr = ANCHORSTR + k;
+ assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
+ + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+ bodystr, teststr);
+ }
+
+ System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+ }
+
+ private void badPuts() {
+
+ // Try put with bad lockid.
+ boolean exceptionThrown = false;
+ try {
+ region.put(-1, CONTENTS_BASIC,
+ "bad input".getBytes(HConstants.UTF8_ENCODING));
+ } catch (Exception e) {
+ exceptionThrown = true;
+ }
+ assertTrue("Bad lock id", exceptionThrown);
+
+ // Try column name not registered in the table.
+ exceptionThrown = false;
+ long lockid = -1;
+ try {
+ lockid = region.startUpdate(new Text("Some old key"));
+ String unregisteredColName = "FamilyGroup:FamilyLabel";
+ region.put(lockid, new Text(unregisteredColName),
+ unregisteredColName.getBytes(HConstants.UTF8_ENCODING));
+ region.commit(lockid);
+ } catch (IOException e) {
+ exceptionThrown = true;
+ } finally {
+ if (lockid != -1) {
+ region.abort(lockid);
+ }
+ }
+ assertTrue("Bad family", exceptionThrown);
+ }
+
+ /**
+ * Test getting and releasing locks.
+ */
+ private void locks() {
+ final int threadCount = 10;
+ final int lockCount = 10;
+
+ List<Thread>threads = new ArrayList<Thread>(threadCount);
+ for (int i = 0; i < threadCount; i++) {
+ threads.add(new Thread(Integer.toString(i)) {
+ @Override
+ public void run() {
+ long [] lockids = new long[lockCount];
+ // Get locks.
+ for (int i = 0; i < lockCount; i++) {
+ try {
+ Text rowid = new Text(Integer.toString(i));
+ lockids[i] = r.obtainRowLock(rowid);
+ rowid.equals(r.getRowFromLock(lockids[i]));
+ LOG.debug(getName() + " locked " + rowid.toString());
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ LOG.debug(getName() + " set " +
+ Integer.toString(lockCount) + " locks");
+
+ // Abort outstanding locks.
+ for (int i = lockCount - 1; i >= 0; i--) {
+ r.releaseRowLock(r.getRowFromLock(lockids[i]));
+ LOG.debug(getName() + " unlocked " + i);
+ }
+ LOG.debug(getName() + " released " +
+ Integer.toString(lockCount) + " locks");
+ }
+ });
+ }
+
+ // Startup all our threads.
+ for (Thread t : threads) {
+ t.start();
+ }
+
+ // Now wait around till all are done.
+ for (Thread t: threads) {
+ while (t.isAlive()) {
+ try {
+ Thread.sleep(1);
+ } catch (InterruptedException e) {
+ // Go around again.
+ }
+ }
+ }
+ }
+
+ // Test scanners. Writes contents:firstcol and anchor:secondcol
+
+ private void scan() throws IOException {
+ Text cols[] = new Text[] {
+ CONTENTS_FIRSTCOL,
+ ANCHOR_SECONDCOL
+ };
+
+ // Test the Scanner!!!
+ String[] vals1 = new String[1000];
+ for(int k = 0; k < vals1.length; k++) {
+ vals1[k] = Integer.toString(k);
+ }
+
+ // 1. Insert a bunch of values
+
+ long startTime = System.currentTimeMillis();
+
+ for(int k = 0; k < vals1.length / 2; k++) {
+ String kLabel = String.format("%1$03d", k);
+
+ long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
+ region.put(lockid, cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
+ region.put(lockid, cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
+ region.commit(lockid, System.currentTimeMillis());
+ numInserted += 2;
+ }
+
+ System.out.println("Write " + (vals1.length / 2) + " elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // 2. Scan from cache
+
+ startTime = System.currentTimeMillis();
+
+ HScannerInterface s =
+ r.getScanner(cols, new Text(), System.currentTimeMillis(), null);
+ int numFetched = 0;
+ try {
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ int k = 0;
+ while(s.next(curKey, curVals)) {
+ for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+ Text col = it.next();
+ byte [] val = curVals.get(col);
+ int curval =
+ Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
+ for(int j = 0; j < cols.length; j++) {
+ if(col.compareTo(cols[j]) == 0) {
+ assertEquals("Error at:" + curKey.getRow() + "/"
+ + curKey.getTimestamp()
+ + ", Value for " + col + " should be: " + k
+ + ", but was fetched as: " + curval, k, curval);
+ numFetched++;
+ }
+ }
+ }
+ curVals.clear();
+ k++;
+ }
+ } finally {
+ s.close();
+ }
+ assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
+
+ System.out.println("Scanned " + (vals1.length / 2)
+ + " rows from cache. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // 3. Flush to disk
+
+ startTime = System.currentTimeMillis();
+
+ region.flushcache();
+
+ System.out.println("Cache flush elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // 4. Scan from disk
+
+ startTime = System.currentTimeMillis();
+
+ s = r.getScanner(cols, new Text(), System.currentTimeMillis(), null);
+ numFetched = 0;
+ try {
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ int k = 0;
+ while(s.next(curKey, curVals)) {
+ for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+ Text col = it.next();
+ byte [] val = curVals.get(col);
+ int curval =
+ Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
+ for(int j = 0; j < cols.length; j++) {
+ if(col.compareTo(cols[j]) == 0) {
+ assertEquals("Error at:" + curKey.getRow() + "/"
+ + curKey.getTimestamp()
+ + ", Value for " + col + " should be: " + k
+ + ", but was fetched as: " + curval, k, curval);
+ numFetched++;
+ }
+ }
+ }
+ curVals.clear();
+ k++;
+ }
+ } finally {
+ s.close();
+ }
+ assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
+
+ System.out.println("Scanned " + (vals1.length / 2)
+ + " rows from disk. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // 5. Insert more values
+
+ startTime = System.currentTimeMillis();
+
+ for(int k = vals1.length/2; k < vals1.length; k++) {
+ String kLabel = String.format("%1$03d", k);
+
+ long lockid = region.startUpdate(new Text("row_vals1_" + kLabel));
+ region.put(lockid, cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
+ region.put(lockid, cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
+ region.commit(lockid, System.currentTimeMillis());
+ numInserted += 2;
+ }
+
+ System.out.println("Write " + (vals1.length / 2) + " rows. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // 6. Scan from cache and disk
+
+ startTime = System.currentTimeMillis();
+
+ s = r.getScanner(cols, new Text(), System.currentTimeMillis(), null);
+ numFetched = 0;
+ try {
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ int k = 0;
+ while(s.next(curKey, curVals)) {
+ for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+ Text col = it.next();
+ byte [] val = curVals.get(col);
+ int curval =
+ Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
+ for(int j = 0; j < cols.length; j++) {
+ if(col.compareTo(cols[j]) == 0) {
+ assertEquals("Error at:" + curKey.getRow() + "/"
+ + curKey.getTimestamp()
+ + ", Value for " + col + " should be: " + k
+ + ", but was fetched as: " + curval, k, curval);
+ numFetched++;
+ }
+ }
+ }
+ curVals.clear();
+ k++;
+ }
+ } finally {
+ s.close();
+ }
+ assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
+
+ System.out.println("Scanned " + vals1.length
+ + " rows from cache and disk. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // 7. Flush to disk
+
+ startTime = System.currentTimeMillis();
+
+ region.flushcache();
+
+ System.out.println("Cache flush elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // 8. Scan from disk
+
+ startTime = System.currentTimeMillis();
+
+ s = r.getScanner(cols, new Text(), System.currentTimeMillis(), null);
+ numFetched = 0;
+ try {
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ int k = 0;
+ while(s.next(curKey, curVals)) {
+ for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+ Text col = it.next();
+ byte [] val = curVals.get(col);
+ int curval =
+ Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
+ for (int j = 0; j < cols.length; j++) {
+ if (col.compareTo(cols[j]) == 0) {
+ assertEquals("Value for " + col + " should be: " + k
+ + ", but was fetched as: " + curval, curval, k);
+ numFetched++;
+ }
+ }
+ }
+ curVals.clear();
+ k++;
+ }
+ } finally {
+ s.close();
+ }
+ assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
+
+ System.out.println("Scanned " + vals1.length
+ + " rows from disk. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ // 9. Scan with a starting point
+
+ startTime = System.currentTimeMillis();
+
+ s = r.getScanner(cols, new Text("row_vals1_500"),
+ System.currentTimeMillis(), null);
+
+ numFetched = 0;
+ try {
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ int k = 500;
+ while(s.next(curKey, curVals)) {
+ for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+ Text col = it.next();
+ byte [] val = curVals.get(col);
+ int curval =
+ Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
+ for (int j = 0; j < cols.length; j++) {
+ if (col.compareTo(cols[j]) == 0) {
+ assertEquals("Value for " + col + " should be: " + k
+ + ", but was fetched as: " + curval, curval, k);
+ numFetched++;
+ }
+ }
+ }
+ curVals.clear();
+ k++;
+ }
+ } finally {
+ s.close();
+ }
+ assertEquals("Should have fetched " + (numInserted / 2) + " values, but fetched " + numFetched, (numInserted / 2), numFetched);
+
+ System.out.println("Scanned " + (numFetched / 2)
+ + " rows from disk with specified start point. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+ }
+
+ // Do a large number of writes. Disabled if not debugging because it takes a
+ // long time to run.
+ // Creates contents:body
+
+ private void batchWrite() throws IOException {
+ if(! StaticTestEnvironment.debugging) {
+ return;
+ }
+
+ long totalFlush = 0;
+ long totalCompact = 0;
+ long totalLog = 0;
+ long startTime = System.currentTimeMillis();
+
+ // 1M writes
+
+ int valsize = 1000;
+ for (int k = FIRST_ROW; k <= N_ROWS; k++) {
+ // Come up with a random 1000-byte string
+ String randstr1 = "" + System.currentTimeMillis();
+ StringBuffer buf1 = new StringBuffer("val_" + k + "__");
+ while (buf1.length() < valsize) {
+ buf1.append(randstr1);
+ }
+
+ // Write to the HRegion
+ long writeid = region.startUpdate(new Text("row_" + k));
+ region.put(writeid, CONTENTS_BODY,
+ buf1.toString().getBytes(HConstants.UTF8_ENCODING));
+ region.commit(writeid, System.currentTimeMillis());
+ if (k > 0 && k % (N_ROWS / 100) == 0) {
+ System.out.println("Flushing write #" + k);
+
+ long flushStart = System.currentTimeMillis();
+ region.flushcache();
+ long flushEnd = System.currentTimeMillis();
+ totalFlush += (flushEnd - flushStart);
+
+ if (k % (N_ROWS / 10) == 0) {
+ System.out.print("Rolling log...");
+ long logStart = System.currentTimeMillis();
+ log.rollWriter();
+ long logEnd = System.currentTimeMillis();
+ totalLog += (logEnd - logStart);
+ System.out.println(" elapsed time: " + ((logEnd - logStart) / 1000.0));
+ }
+ }
+ }
+ long startCompact = System.currentTimeMillis();
+ if(r.compactIfNeeded()) {
+ totalCompact = System.currentTimeMillis() - startCompact;
+ System.out.println("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
+
+ } else {
+ System.out.println("No compaction required.");
+ }
+ long endTime = System.currentTimeMillis();
+
+ long totalElapsed = (endTime - startTime);
+ System.out.println();
+ System.out.println("Batch-write complete.");
+ System.out.println("Wrote " + N_ROWS + " rows, each of ~" + valsize + " bytes");
+ System.out.println("Total flush-time: " + (totalFlush / 1000.0));
+ System.out.println("Total compact-time: " + (totalCompact / 1000.0));
+ System.out.println("Total log-time: " + (totalLog / 1000.0));
+ System.out.println("Total time elapsed: " + (totalElapsed / 1000.0));
+ System.out.println("Total time, rows/second: " + (N_ROWS / (totalElapsed / 1000.0)));
+ System.out.println("Adjusted time (not including flush, compact, or log): " + ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0));
+ System.out.println("Adjusted time, rows/second: " + (N_ROWS / ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0)));
+ System.out.println();
+
+ }
+
+ // NOTE: This test depends on testBatchWrite succeeding
+ private void splitAndMerge() throws IOException {
+ Path oldRegionPath = r.getRegionDir();
+ long startTime = System.currentTimeMillis();
+ HRegion subregions[] = r.splitRegion(this);
+ if (subregions != null) {
+ System.out.println("Split region elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ assertEquals("Number of subregions", subregions.length, 2);
+
+ // Now merge it back together
+
+ Path oldRegion1 = subregions[0].getRegionDir();
+ Path oldRegion2 = subregions[1].getRegionDir();
+ startTime = System.currentTimeMillis();
+ r = HRegion.closeAndMerge(subregions[0], subregions[1]);
+ region = new HRegionIncommon(r);
+ System.out.println("Merge regions elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+ fs.delete(oldRegion1);
+ fs.delete(oldRegion2);
+ fs.delete(oldRegionPath);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void closing(@SuppressWarnings("unused") final Text regionName) {
+ // We don't use this here. It is only for the HRegionServer
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void closed(@SuppressWarnings("unused") final Text regionName) {
+ // We don't use this here. It is only for the HRegionServer
+ }
+
+ // This test verifies that everything is still there after splitting and merging
+
+ private void read() throws IOException {
+
+ // First verify the data written by testBasic()
+
+ Text[] cols = new Text[] {
+ new Text(ANCHORNUM + "[0-9]+"),
+ new Text(CONTENTS_BASIC)
+ };
+
+ long startTime = System.currentTimeMillis();
+
+ HScannerInterface s =
+ r.getScanner(cols, new Text(), System.currentTimeMillis(), null);
+
+ try {
+
+ int contentsFetched = 0;
+ int anchorFetched = 0;
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ int k = 0;
+ while(s.next(curKey, curVals)) {
+ for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+ Text col = it.next();
+ byte [] val = curVals.get(col);
+ String curval = new String(val, HConstants.UTF8_ENCODING).trim();
+
+ if(col.compareTo(CONTENTS_BASIC) == 0) {
+ assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
+ + ", Value for " + col + " should start with: " + CONTENTSTR
+ + ", but was fetched as: " + curval,
+ curval.startsWith(CONTENTSTR));
+ contentsFetched++;
+
+ } else if(col.toString().startsWith(ANCHORNUM)) {
+ assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
+ + ", Value for " + col + " should start with: " + ANCHORSTR
+ + ", but was fetched as: " + curval,
+ curval.startsWith(ANCHORSTR));
+ anchorFetched++;
+
+ } else {
+ System.out.println("UNEXPECTED COLUMN " + col);
+ }
+ }
+ curVals.clear();
+ k++;
+ }
+ assertEquals("Expected " + NUM_VALS + " " + CONTENTS_BASIC + " values, but fetched " + contentsFetched, NUM_VALS, contentsFetched);
+ assertEquals("Expected " + NUM_VALS + " " + ANCHORNUM + " values, but fetched " + anchorFetched, NUM_VALS, anchorFetched);
+
+ System.out.println("Scanned " + NUM_VALS
+ + " rows from disk. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ } finally {
+ s.close();
+ }
+
+ // Verify testScan data
+
+ cols = new Text[] {
+ CONTENTS_FIRSTCOL,
+ ANCHOR_SECONDCOL
+ };
+
+ startTime = System.currentTimeMillis();
+
+ s = r.getScanner(cols, new Text(), System.currentTimeMillis(), null);
+ try {
+ int numFetched = 0;
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ int k = 0;
+ while(s.next(curKey, curVals)) {
+ for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+ Text col = it.next();
+ byte [] val = curVals.get(col);
+ int curval =
+ Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
+
+ for (int j = 0; j < cols.length; j++) {
+ if (col.compareTo(cols[j]) == 0) {
+ assertEquals("Value for " + col + " should be: " + k
+ + ", but was fetched as: " + curval, curval, k);
+ numFetched++;
+ }
+ }
+ }
+ curVals.clear();
+ k++;
+ }
+ assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);
+
+ System.out.println("Scanned " + (numFetched / 2)
+ + " rows from disk. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ } finally {
+ s.close();
+ }
+
+ // Verify testBatchWrite data
+
+ if(StaticTestEnvironment.debugging) {
+ startTime = System.currentTimeMillis();
+ s = r.getScanner(new Text[] { CONTENTS_BODY }, new Text(),
+ System.currentTimeMillis(), null);
+
+ try {
+ int numFetched = 0;
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ int k = 0;
+ while(s.next(curKey, curVals)) {
+ for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+ Text col = it.next();
+ byte [] val = curVals.get(col);
+ assertTrue(col.compareTo(CONTENTS_BODY) == 0);
+ assertNotNull(val);
+ numFetched++;
+ }
+ curVals.clear();
+ k++;
+ }
+ assertEquals("Inserted " + N_ROWS + " values, but fetched " + numFetched, N_ROWS, numFetched);
+
+ System.out.println("Scanned " + N_ROWS
+ + " rows from disk. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ } finally {
+ s.close();
+ }
+ }
+
+ // Test a scanner which only specifies the column family name
+
+ cols = new Text[] {
+ new Text("anchor:")
+ };
+
+ startTime = System.currentTimeMillis();
+
+ s = r.getScanner(cols, new Text(), System.currentTimeMillis(), null);
+
+ try {
+ int fetched = 0;
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ while(s.next(curKey, curVals)) {
+ for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+ it.next();
+ fetched++;
+ }
+ curVals.clear();
+ }
+ assertEquals("Inserted " + (NUM_VALS + numInserted/2) + " values, but fetched " + fetched, (NUM_VALS + numInserted/2), fetched);
+
+ System.out.println("Scanned " + fetched
+ + " rows from disk. Elapsed time: "
+ + ((System.currentTimeMillis() - startTime) / 1000.0));
+
+ } finally {
+ s.close();
+ }
+ }
+
+ private static void deleteFile(File f) {
+ if(f.isDirectory()) {
+ File[] children = f.listFiles();
+ for(int i = 0; i < children.length; i++) {
+ deleteFile(children[i]);
+ }
+ }
+ f.delete();
+ }
+
+ private void cleanup() {
+ try {
+ r.close();
+ r = null;
+ log.closeAndDelete();
+ log = null;
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+
+ // Delete all the DFS files
+
+ deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
+ }
+}
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,392 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.io.MapFile;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.hbase.HBaseTestCase;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+/**
+ * Test HStoreFile
+ */
+public class TestHStoreFile extends HBaseTestCase {
+ static final Log LOG = LogFactory.getLog(TestHStoreFile.class);
+ private static String DIR = "/";
+ private MiniDFSCluster cluster;
+ private Path dir = null;
+
+ /** {@inheritDoc} */
+ @Override
+ public void setUp() throws Exception {
+ try {
+ this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.cluster.getFileSystem().getHomeDirectory().toString());
+ this.dir = new Path(DIR, getName());
+ } catch (IOException e) {
+ StaticTestEnvironment.shutdownDfs(cluster);
+ }
+ super.setUp();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ StaticTestEnvironment.shutdownDfs(cluster);
+ // ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
+ // "Temporary end-of-test thread dump debugging HADOOP-2040: " + getName());
+ }
+
+ private Path writeMapFile(final String name)
+ throws IOException {
+ Path path = new Path(DIR, name);
+ MapFile.Writer writer = new MapFile.Writer(this.conf, fs, path.toString(),
+ HStoreKey.class, ImmutableBytesWritable.class);
+ writeStoreFile(writer);
+ return path;
+ }
+
+ private Path writeSmallMapFile(final String name)
+ throws IOException {
+ Path path = new Path(DIR, name);
+ MapFile.Writer writer = new MapFile.Writer(this.conf, fs, path.toString(),
+ HStoreKey.class, ImmutableBytesWritable.class);
+ try {
+ for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
+ byte[] b = new byte[] {(byte)d};
+ Text t = new Text(new String(b, HConstants.UTF8_ENCODING));
+ writer.append(new HStoreKey(t, t, System.currentTimeMillis()),
+ new ImmutableBytesWritable(t.getBytes()));
+ }
+ } finally {
+ writer.close();
+ }
+ return path;
+ }
+
+ /*
+ * Writes HStoreKey and ImmutableBytes data to passed writer and
+ * then closes it.
+ * @param writer
+ * @throws IOException
+ */
+ private void writeStoreFile(final MapFile.Writer writer)
+ throws IOException {
+ try {
+ for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
+ for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) {
+ byte[] b = new byte[] { (byte) d, (byte) e };
+ Text t = new Text(new String(b, HConstants.UTF8_ENCODING));
+ writer.append(new HStoreKey(t, t, System.currentTimeMillis()),
+ new ImmutableBytesWritable(t.getBytes()));
+ }
+ }
+ } finally {
+ writer.close();
+ }
+ }
+
+ /**
+ * Test that our mechanism of writing store files in one region to reference
+ * store files in other regions works.
+ * @throws IOException
+ */
+ public void testReference()
+ throws IOException {
+ // Make a store file and write data to it.
+ HStoreFile hsf = new HStoreFile(this.conf, this.fs, this.dir, getName(),
+ new Text("colfamily"), 1234567890L, null);
+ MapFile.Writer writer =
+ hsf.getWriter(this.fs, SequenceFile.CompressionType.NONE, null);
+ writeStoreFile(writer);
+ MapFile.Reader reader = hsf.getReader(this.fs, null);
+ // Split on a row, not in middle of row. Midkey returned by reader
+ // may be in middle of row. Create new one with empty column and
+ // timestamp.
+ HStoreKey midkey = new HStoreKey(((HStoreKey)reader.midKey()).getRow());
+ HStoreKey hsk = new HStoreKey();
+ reader.finalKey(hsk);
+ Text finalKey = hsk.getRow();
+ // Make a reference for the bottom half of the just written file.
+ HStoreFile.Reference reference =
+ new HStoreFile.Reference(hsf.getEncodedRegionName(), hsf.getFileId(),
+ midkey, HStoreFile.Range.top);
+ HStoreFile refHsf = new HStoreFile(this.conf, this.fs,
+ new Path(DIR, getName()), getName() + "_reference", hsf.getColFamily(),
+ 456, reference);
+ // Assert that reference files are written and that we can write and
+ // read the info reference file at least.
+ refHsf.writeReferenceFiles(this.fs);
+ assertTrue(this.fs.exists(refHsf.getMapFilePath()));
+ assertTrue(this.fs.exists(refHsf.getInfoFilePath()));
+ HStoreFile.Reference otherReference =
+ HStore.readSplitInfo(refHsf.getInfoFilePath(), this.fs);
+ assertEquals(reference.getEncodedRegionName(),
+ otherReference.getEncodedRegionName());
+ assertEquals(reference.getFileId(),
+ otherReference.getFileId());
+ assertEquals(reference.getMidkey().toString(),
+ otherReference.getMidkey().toString());
+ // Now confirm that I can read from the reference and that it only gets
+ // keys from top half of the file.
+ MapFile.Reader halfReader = refHsf.getReader(this.fs, null);
+ HStoreKey key = new HStoreKey();
+ ImmutableBytesWritable value = new ImmutableBytesWritable();
+ boolean first = true;
+ while(halfReader.next(key, value)) {
+ if (first) {
+ assertEquals(key.getRow().toString(), midkey.getRow().toString());
+ first = false;
+ }
+ }
+ assertEquals(key.getRow().toString(), finalKey.toString());
+ }
+
+ /**
+ * Write a file and then assert that we can read from top and bottom halves
+ * using two HalfMapFiles.
+ * @throws Exception
+ */
+ public void testBasicHalfMapFile() throws Exception {
+ Path p = writeMapFile(getName());
+ WritableComparable midkey = getMidkey(p);
+ checkHalfMapFile(p, midkey);
+ }
+
+ /**
+ * Check HalfMapFile works even if file we're to go against is smaller than
+ * the default MapFile interval of 128: i.e. index gets entry every 128
+ * keys.
+ * @throws Exception
+ */
+ public void testSmallHalfMapFile() throws Exception {
+ Path p = writeSmallMapFile(getName());
+ // I know keys are a-z. Let the midkey we want to use be 'd'. See if
+ // HalfMapFiles work even if size of file is < than default MapFile
+ // interval.
+ checkHalfMapFile(p, new HStoreKey(new Text("d")));
+ }
+
+ private WritableComparable getMidkey(final Path p) throws IOException {
+ MapFile.Reader reader =
+ new MapFile.Reader(this.fs, p.toString(), this.conf);
+ HStoreKey key = new HStoreKey();
+ ImmutableBytesWritable value = new ImmutableBytesWritable();
+ reader.next(key, value);
+ String firstKey = key.toString();
+ WritableComparable midkey = reader.midKey();
+ reader.finalKey(key);
+ LOG.info("First key " + firstKey + ", midkey " + midkey.toString()
+ + ", last key " + key.toString());
+ reader.close();
+ return midkey;
+ }
+
+ private void checkHalfMapFile(final Path p, WritableComparable midkey)
+ throws IOException {
+ MapFile.Reader top = null;
+ MapFile.Reader bottom = null;
+ HStoreKey key = new HStoreKey();
+ ImmutableBytesWritable value = new ImmutableBytesWritable();
+ String previous = null;
+ try {
+ // Now make two HalfMapFiles and assert they can read the full backing
+ // file, one from the top and the other from the bottom.
+ // Test bottom half first.
+ bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
+ this.conf, HStoreFile.Range.bottom, midkey);
+ boolean first = true;
+ while (bottom.next(key, value)) {
+ previous = key.toString();
+ if (first) {
+ first = false;
+ LOG.info("First in bottom: " + previous);
+ }
+ assertTrue(key.compareTo(midkey) < 0);
+ }
+ if (previous != null) {
+ LOG.info("Last in bottom: " + previous.toString());
+ }
+ // Now test reading from the top.
+ top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), this.conf,
+ HStoreFile.Range.top, midkey);
+ first = true;
+ while (top.next(key, value)) {
+ assertTrue(key.compareTo(midkey) >= 0);
+ if (first) {
+ first = false;
+ assertEquals(((HStoreKey)midkey).getRow().toString(),
+ key.getRow().toString());
+ LOG.info("First in top: " + key.toString());
+ }
+ }
+ LOG.info("Last in top: " + key.toString());
+ top.getClosest(midkey, value);
+ // Assert value is same as key.
+ assertEquals(new String(value.get(), HConstants.UTF8_ENCODING),
+ ((HStoreKey) midkey).getRow().toString());
+
+ // Next test using a midkey that does not exist in the file.
+ // First, do a key that is < than first key. Ensure splits behave
+ // properly.
+ WritableComparable badkey = new HStoreKey(new Text(" "));
+ bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
+ this.conf, HStoreFile.Range.bottom, badkey);
+ // When badkey is < than the bottom, should return no values.
+ assertFalse(bottom.next(key, value));
+ // Now read from the top.
+ top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), this.conf,
+ HStoreFile.Range.top, badkey);
+ first = true;
+ while (top.next(key, value)) {
+ assertTrue(key.compareTo(badkey) >= 0);
+ if (first) {
+ first = false;
+ LOG.info("First top when key < bottom: " + key.toString());
+ String tmp = key.getRow().toString();
+ for (int i = 0; i < tmp.length(); i++) {
+ assertTrue(tmp.charAt(i) == 'a');
+ }
+ }
+ }
+ LOG.info("Last top when key < bottom: " + key.toString());
+ String tmp = key.getRow().toString();
+ for (int i = 0; i < tmp.length(); i++) {
+ assertTrue(tmp.charAt(i) == 'z');
+ }
+
+ // Test when badkey is > than last key in file ('||' > 'zz').
+ badkey = new HStoreKey(new Text("|||"));
+ bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
+ this.conf, HStoreFile.Range.bottom, badkey);
+ first = true;
+ while (bottom.next(key, value)) {
+ if (first) {
+ first = false;
+ LOG.info("First bottom when key > top: " + key.toString());
+ tmp = key.getRow().toString();
+ for (int i = 0; i < tmp.length(); i++) {
+ assertTrue(tmp.charAt(i) == 'a');
+ }
+ }
+ }
+ LOG.info("Last bottom when key > top: " + key.toString());
+ tmp = key.getRow().toString();
+ for (int i = 0; i < tmp.length(); i++) {
+ assertTrue(tmp.charAt(i) == 'z');
+ }
+ // Now look at top. Should not return any values.
+ top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), this.conf,
+ HStoreFile.Range.top, badkey);
+ assertFalse(top.next(key, value));
+
+ } finally {
+ if (top != null) {
+ top.close();
+ }
+ if (bottom != null) {
+ bottom.close();
+ }
+ fs.delete(p);
+ }
+ }
+
+ /**
+ * Assert HalFMapFile does right thing when midkey does not exist in the
+ * backing file (its larger or smaller than any of the backing mapfiles keys).
+ *
+ * @throws Exception
+ */
+ public void testOutOfRangeMidkeyHalfMapFile() throws Exception {
+ MapFile.Reader top = null;
+ MapFile.Reader bottom = null;
+ HStoreKey key = new HStoreKey();
+ ImmutableBytesWritable value = new ImmutableBytesWritable();
+ Path p = writeMapFile(getName());
+ try {
+ try {
+ // Test using a midkey that does not exist in the file.
+ // First, do a key that is < than first key. Ensure splits behave
+ // properly.
+ HStoreKey midkey = new HStoreKey(new Text(" "));
+ bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
+ this.conf, HStoreFile.Range.bottom, midkey);
+ // When midkey is < than the bottom, should return no values.
+ assertFalse(bottom.next(key, value));
+ // Now read from the top.
+ top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
+ this.conf, HStoreFile.Range.top, midkey);
+ boolean first = true;
+ while (top.next(key, value)) {
+ assertTrue(key.compareTo(midkey) >= 0);
+ if (first) {
+ first = false;
+ LOG.info("First top when key < bottom: " + key.toString());
+ assertEquals("aa", key.getRow().toString());
+ }
+ }
+ LOG.info("Last top when key < bottom: " + key.toString());
+ assertEquals("zz", key.getRow().toString());
+
+ // Test when midkey is > than last key in file ('||' > 'zz').
+ midkey = new HStoreKey(new Text("|||"));
+ bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
+ this.conf, HStoreFile.Range.bottom, midkey);
+ first = true;
+ while (bottom.next(key, value)) {
+ if (first) {
+ first = false;
+ LOG.info("First bottom when key > top: " + key.toString());
+ assertEquals("aa", key.getRow().toString());
+ }
+ }
+ LOG.info("Last bottom when key > top: " + key.toString());
+ assertEquals("zz", key.getRow().toString());
+ // Now look at top. Should not return any values.
+ top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(),
+ this.conf, HStoreFile.Range.top, midkey);
+ assertFalse(top.next(key, value));
+ } finally {
+ if (top != null) {
+ top.close();
+ }
+ if (bottom != null) {
+ bottom.close();
+ }
+ fs.delete(p);
+ }
+ } finally {
+ this.fs.delete(p);
+ }
+ }
+}
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestLogRolling.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,203 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+
+/**
+ * Test log deletion as logs are rolled.
+ */
+public class TestLogRolling extends HBaseTestCase {
+ private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
+ private MiniDFSCluster dfs;
+ private MiniHBaseCluster cluster;
+ private HRegionServer server;
+ private HLog log;
+ private String tableName;
+ private byte[] value;
+
+ /**
+ * constructor
+ * @throws Exception
+ */
+ public TestLogRolling() throws Exception {
+ super();
+ try {
+ this.dfs = null;
+ this.cluster = null;
+ this.server = null;
+ this.log = null;
+ this.tableName = null;
+ this.value = null;
+
+ // Force a region split after every 768KB
+ conf.setLong("hbase.hregion.max.filesize", 768L * 1024L);
+
+ // We roll the log after every 256 writes
+ conf.setInt("hbase.regionserver.maxlogentries", 256);
+
+ // For less frequently updated regions flush after every 2 flushes
+ conf.setInt("hbase.hregion.memcache.optionalflushcount", 2);
+
+ // We flush the cache after every 8192 bytes
+ conf.setInt("hbase.hregion.memcache.flush.size", 8192);
+
+ // Make lease timeout longer, lease checks less frequent
+ conf.setInt("hbase.master.lease.period", 10 * 1000);
+ conf.setInt("hbase.master.lease.thread.wakefrequency", 5 * 1000);
+
+ // Increase the amount of time between client retries
+ conf.setLong("hbase.client.pause", 15 * 1000);
+
+ // Reduce thread wake frequency so that other threads can get
+ // a chance to run.
+ conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
+
+ String className = this.getClass().getName();
+ StringBuilder v = new StringBuilder(className);
+ while (v.length() < 1000) {
+ v.append(className);
+ }
+ value = v.toString().getBytes(HConstants.UTF8_ENCODING);
+
+ } catch (Exception e) {
+ LOG.fatal("error in constructor", e);
+ throw e;
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void setUp() throws Exception {
+ try {
+ super.setUp();
+ dfs = new MiniDFSCluster(conf, 2, true, (String[]) null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.dfs.getFileSystem().getHomeDirectory().toString());
+ } catch (Exception e) {
+ StaticTestEnvironment.shutdownDfs(dfs);
+ LOG.fatal("error during setUp: ", e);
+ throw e;
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void tearDown() throws Exception {
+ try {
+ super.tearDown();
+ if (cluster != null) { // shutdown mini HBase cluster
+ cluster.shutdown();
+ }
+ StaticTestEnvironment.shutdownDfs(dfs);
+ } catch (Exception e) {
+ LOG.fatal("error in tearDown", e);
+ throw e;
+ }
+ }
+
+ private void startAndWriteData() throws Exception {
+ cluster = new MiniHBaseCluster(conf, 1, dfs, true);
+ try {
+ Thread.sleep(10 * 1000); // Wait for region server to start
+ } catch (InterruptedException e) {
+ // continue
+ }
+
+ this.server = cluster.getRegionThreads().get(0).getRegionServer();
+ this.log = server.getLog();
+
+ // When the META table can be opened, the region servers are running
+ HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
+
+ // Create the test table and open it
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ admin.createTable(desc);
+ HTable table = new HTable(conf, new Text(tableName));
+
+ for (int i = 1; i <= 2048; i++) { // 2048 writes should cause 8 log rolls
+ long lockid =
+ table.startUpdate(new Text("row" + String.format("%1$04d", i)));
+ table.put(lockid, HConstants.COLUMN_FAMILY, value);
+ table.commit(lockid);
+
+ if (i % 256 == 0) {
+ // After every 256 writes sleep to let the log roller run
+
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ // continue
+ }
+ }
+ }
+ }
+
+ /**
+ * Tests that logs are deleted
+ *
+ * @throws Exception
+ */
+ public void testLogRolling() throws Exception {
+ tableName = getName();
+ try {
+ startAndWriteData();
+ LOG.info("after writing there are " + log.getNumLogFiles() + " log files");
+
+ // flush all regions
+
+ List<HRegion> regions =
+ new ArrayList<HRegion>(server.getOnlineRegions().values());
+ for (HRegion r: regions) {
+ r.flushcache();
+ }
+
+ // Now roll the log
+ log.rollWriter();
+
+ int count = log.getNumLogFiles();
+ LOG.info("after flushing all regions and rolling logs there are " +
+ log.getNumLogFiles() + " log files");
+ assertTrue(count <= 2);
+ } catch (Exception e) {
+ LOG.fatal("unexpected exception", e);
+ throw e;
+ }
+ }
+
+}