You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by br...@apache.org on 2008/02/24 01:19:44 UTC
svn commit: r630550 [7/7] - in /hadoop/hbase/trunk: bin/ conf/
src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/
src/java/org/apache/hadoop/hbase/filter/
src/java/org/apache/hadoop/hbase/generated/regionserver/ src/java/org/apa...
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestRegionServerExit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestRegionServerExit.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestRegionServerExit.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestRegionServerExit.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,210 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.Text;
+
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.HScannerInterface;
+
+/**
+ * Tests region server failover when a region server exits both cleanly and
+ * when it aborts.
+ */
+public class TestRegionServerExit extends HBaseClusterTestCase {
+ final Log LOG = LogFactory.getLog(this.getClass().getName());
+ HTable table;
+
+ /** constructor */
+ public TestRegionServerExit() {
+ super(2);
+ conf.setInt("ipc.client.connect.max.retries", 5); // reduce ipc retries
+ conf.setInt("ipc.client.timeout", 10000); // and ipc timeout
+ conf.setInt("hbase.client.pause", 10000); // increase client timeout
+ conf.setInt("hbase.client.retries.number", 10); // increase HBase retries
+ }
+
+ /**
+ * Test abort of region server.
+ * @throws IOException
+ */
+ public void testAbort() throws IOException {
+ // When the META table can be opened, the region servers are running
+ new HTable(conf, HConstants.META_TABLE_NAME);
+ // Create table and add a row.
+ final String tableName = getName();
+ Text row = createTableAndAddRow(tableName);
+ // Start up a new region server to take over serving of root and meta
+ // after we shut down the current meta/root host.
+ this.cluster.startRegionServer();
+ // Now abort the meta region server and wait for it to go down and come back
+ stopOrAbortMetaRegionServer(true);
+ // Verify that everything is back up.
+ Thread t = startVerificationThread(tableName, row);
+ t.start();
+ threadDumpingJoin(t);
+ }
+
+ /**
+ * Test abort of region server.
+ * @throws IOException
+ */
+ public void testCleanExit() throws IOException {
+ // When the META table can be opened, the region servers are running
+ new HTable(this.conf, HConstants.META_TABLE_NAME);
+ // Create table and add a row.
+ final String tableName = getName();
+ Text row = createTableAndAddRow(tableName);
+ // Start up a new region server to take over serving of root and meta
+ // after we shut down the current meta/root host.
+ this.cluster.startRegionServer();
+ // Now abort the meta region server and wait for it to go down and come back
+ stopOrAbortMetaRegionServer(false);
+ // Verify that everything is back up.
+ Thread t = startVerificationThread(tableName, row);
+ t.start();
+ threadDumpingJoin(t);
+ }
+
+ private Text createTableAndAddRow(final String tableName) throws IOException {
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ admin.createTable(desc);
+ // put some values in the table
+ this.table = new HTable(conf, new Text(tableName));
+ final Text row = new Text("row1");
+ BatchUpdate b = new BatchUpdate(row);
+ b.put(HConstants.COLUMN_FAMILY,
+ tableName.getBytes(HConstants.UTF8_ENCODING));
+ table.commit(b);
+ return row;
+ }
+
+ /*
+ * Stop the region server serving the meta region and wait for the meta region
+ * to get reassigned. This is always the most problematic case.
+ *
+ * @param abort set to true if region server should be aborted, if false it
+ * is just shut down.
+ */
+ private void stopOrAbortMetaRegionServer(boolean abort) {
+ List<LocalHBaseCluster.RegionServerThread> regionThreads =
+ cluster.getRegionThreads();
+
+ int server = -1;
+ for (int i = 0; i < regionThreads.size() && server == -1; i++) {
+ HRegionServer s = regionThreads.get(i).getRegionServer();
+ Collection<HRegion> regions = s.getOnlineRegions().values();
+ for (HRegion r : regions) {
+ if (r.getTableDesc().getName().equals(HConstants.META_TABLE_NAME)) {
+ server = i;
+ }
+ }
+ }
+ if (server == -1) {
+ LOG.fatal("could not find region server serving meta region");
+ fail();
+ }
+ if (abort) {
+ this.cluster.abortRegionServer(server);
+
+ } else {
+ this.cluster.stopRegionServer(server);
+ }
+ LOG.info(this.cluster.waitOnRegionServer(server) + " has been " +
+ (abort ? "aborted" : "shut down"));
+ }
+
+ /*
+ * Run verification in a thread so I can concurrently run a thread-dumper
+ * while we're waiting (because in this test sometimes the meta scanner
+ * looks to be be stuck).
+ * @param tableName Name of table to find.
+ * @param row Row we expect to find.
+ * @return Verification thread. Caller needs to calls start on it.
+ */
+ private Thread startVerificationThread(final String tableName,
+ final Text row) {
+ Runnable runnable = new Runnable() {
+ public void run() {
+ try {
+ // Now try to open a scanner on the meta table. Should stall until
+ // meta server comes back up.
+ HTable t = new HTable(conf, HConstants.META_TABLE_NAME);
+ HScannerInterface s =
+ t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text());
+ s.close();
+
+ } catch (IOException e) {
+ LOG.fatal("could not re-open meta table because", e);
+ fail();
+ }
+ HScannerInterface scanner = null;
+ try {
+ // Verify that the client can find the data after the region has moved
+ // to a different server
+ scanner =
+ table.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY, new Text());
+ LOG.info("Obtained scanner " + scanner);
+ HStoreKey key = new HStoreKey();
+ TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+ while (scanner.next(key, results)) {
+ assertTrue(key.getRow().equals(row));
+ assertEquals(1, results.size());
+ byte[] bytes = results.get(HConstants.COLUMN_FAMILY);
+ assertNotNull(bytes);
+ assertTrue(tableName.equals(new String(bytes,
+ HConstants.UTF8_ENCODING)));
+ }
+ LOG.info("Success!");
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail();
+ } finally {
+ if (scanner != null) {
+ LOG.info("Closing scanner " + scanner);
+ try {
+ scanner.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+ };
+ return new Thread(runnable);
+ }
+}
\ No newline at end of file
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,263 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.TreeMap;
+
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HRegionInfo;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HScannerInterface;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+
+/**
+ * Test of a long-lived scanner validating as we go.
+ */
+public class TestScanner extends HBaseTestCase {
+ private static final Text FIRST_ROW = new Text();
+ private static final Text[] COLS = {
+ HConstants.COLUMN_FAMILY
+ };
+ private static final Text[] EXPLICIT_COLS = {
+ HConstants.COL_REGIONINFO,
+ HConstants.COL_SERVER,
+ HConstants.COL_STARTCODE
+ };
+
+ private static final Text ROW_KEY =
+ new Text(HRegionInfo.rootRegionInfo.getRegionName());
+ private static final HRegionInfo REGION_INFO = HRegionInfo.rootRegionInfo;
+
+ private static final long START_CODE = Long.MAX_VALUE;
+
+ private MiniDFSCluster cluster = null;
+ private HRegion r;
+ private HRegionIncommon region;
+
+ /** {@inheritDoc} */
+ @Override
+ public void setUp() throws Exception {
+ cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.cluster.getFileSystem().getHomeDirectory().toString());
+ super.setUp();
+
+ }
+
+ /** Compare the HRegionInfo we read from HBase to what we stored */
+ private void validateRegionInfo(byte [] regionBytes) throws IOException {
+ HRegionInfo info =
+ (HRegionInfo) Writables.getWritable(regionBytes, new HRegionInfo());
+
+ assertEquals(REGION_INFO.getRegionId(), info.getRegionId());
+ assertEquals(0, info.getStartKey().getLength());
+ assertEquals(0, info.getEndKey().getLength());
+ assertEquals(0, info.getRegionName().compareTo(REGION_INFO.getRegionName()));
+ assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc()));
+ }
+
+ /** Use a scanner to get the region info and then validate the results */
+ private void scan(boolean validateStartcode, String serverName)
+ throws IOException {
+
+ HScannerInterface scanner = null;
+ TreeMap<Text, byte []> results = new TreeMap<Text, byte []>();
+ HStoreKey key = new HStoreKey();
+
+ Text[][] scanColumns = {
+ COLS,
+ EXPLICIT_COLS
+ };
+
+ for(int i = 0; i < scanColumns.length; i++) {
+ try {
+ scanner = r.getScanner(scanColumns[i], FIRST_ROW,
+ System.currentTimeMillis(), null);
+
+ while(scanner.next(key, results)) {
+ assertTrue(results.containsKey(HConstants.COL_REGIONINFO));
+ byte [] val = results.get(HConstants.COL_REGIONINFO);
+ validateRegionInfo(val);
+ if(validateStartcode) {
+ assertTrue(results.containsKey(HConstants.COL_STARTCODE));
+ val = results.get(HConstants.COL_STARTCODE);
+ assertNotNull(val);
+ assertFalse(val.length == 0);
+ long startCode = Writables.bytesToLong(val);
+ assertEquals(START_CODE, startCode);
+ }
+
+ if(serverName != null) {
+ assertTrue(results.containsKey(HConstants.COL_SERVER));
+ val = results.get(HConstants.COL_SERVER);
+ assertNotNull(val);
+ assertFalse(val.length == 0);
+ String server = Writables.bytesToString(val);
+ assertEquals(0, server.compareTo(serverName));
+ }
+ results.clear();
+ }
+
+ } finally {
+ HScannerInterface s = scanner;
+ scanner = null;
+ if(s != null) {
+ s.close();
+ }
+ }
+ }
+ }
+
+ /** Use get to retrieve the HRegionInfo and validate it */
+ private void getRegionInfo() throws IOException {
+ byte [] bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO);
+ validateRegionInfo(bytes);
+ }
+
+ /** The test!
+ * @throws IOException
+ */
+ public void testScanner() throws IOException {
+ try {
+ r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
+ region = new HRegionIncommon(r);
+
+ // Write information to the meta table
+
+ long lockid = region.startUpdate(ROW_KEY);
+
+ ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+ DataOutputStream s = new DataOutputStream(byteStream);
+ HRegionInfo.rootRegionInfo.write(s);
+ region.put(lockid, HConstants.COL_REGIONINFO, byteStream.toByteArray());
+ region.commit(lockid, System.currentTimeMillis());
+
+ // What we just committed is in the memcache. Verify that we can get
+ // it back both with scanning and get
+
+ scan(false, null);
+ getRegionInfo();
+
+ // Close and re-open
+
+ r.close();
+ r = openClosedRegion(r);
+ region = new HRegionIncommon(r);
+
+ // Verify we can get the data back now that it is on disk.
+
+ scan(false, null);
+ getRegionInfo();
+
+ // Store some new information
+
+ HServerAddress address = new HServerAddress("foo.bar.com:1234");
+
+ lockid = region.startUpdate(ROW_KEY);
+
+ region.put(lockid, HConstants.COL_SERVER,
+ Writables.stringToBytes(address.toString()));
+
+ region.put(lockid, HConstants.COL_STARTCODE,
+ Writables.longToBytes(START_CODE));
+
+ region.commit(lockid, System.currentTimeMillis());
+
+ // Validate that we can still get the HRegionInfo, even though it is in
+ // an older row on disk and there is a newer row in the memcache
+
+ scan(true, address.toString());
+ getRegionInfo();
+
+ // flush cache
+
+ region.flushcache();
+
+ // Validate again
+
+ scan(true, address.toString());
+ getRegionInfo();
+
+ // Close and reopen
+
+ r.close();
+ r = openClosedRegion(r);
+ region = new HRegionIncommon(r);
+
+ // Validate again
+
+ scan(true, address.toString());
+ getRegionInfo();
+
+ // Now update the information again
+
+ address = new HServerAddress("bar.foo.com:4321");
+
+ lockid = region.startUpdate(ROW_KEY);
+
+ region.put(lockid, HConstants.COL_SERVER,
+ Writables.stringToBytes(address.toString()));
+
+ region.commit(lockid, System.currentTimeMillis());
+
+ // Validate again
+
+ scan(true, address.toString());
+ getRegionInfo();
+
+ // flush cache
+
+ region.flushcache();
+
+ // Validate again
+
+ scan(true, address.toString());
+ getRegionInfo();
+
+ // Close and reopen
+
+ r.close();
+ r = openClosedRegion(r);
+ region = new HRegionIncommon(r);
+
+ // Validate again
+
+ scan(true, address.toString());
+ getRegionInfo();
+
+ // clean up
+
+ r.close();
+ r.getLog().closeAndDelete();
+
+ } finally {
+ StaticTestEnvironment.shutdownDfs(cluster);
+ }
+ }
+}
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,237 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.dfs.MiniDFSCluster;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.hadoop.hbase.MultiRegionTable;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HScannerInterface;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+
+/**
+ * {@Link TestHRegion} does a split but this TestCase adds testing of fast
+ * split and manufactures odd-ball split scenarios.
+ */
+public class TestSplit extends MultiRegionTable {
+ @SuppressWarnings("hiding")
+ static final Log LOG = LogFactory.getLog(TestSplit.class.getName());
+
+ /** constructor */
+ public TestSplit() {
+ super();
+
+ // Always compact if there is more than one store file.
+ conf.setInt("hbase.hstore.compactionThreshold", 2);
+
+ // Make lease timeout longer, lease checks less frequent
+ conf.setInt("hbase.master.lease.period", 10 * 1000);
+ conf.setInt("hbase.master.lease.thread.wakefrequency", 5 * 1000);
+
+ // Increase the amount of time between client retries
+ conf.setLong("hbase.client.pause", 15 * 1000);
+
+ // This size should make it so we always split using the addContent
+ // below. After adding all data, the first region is 1.3M
+ conf.setLong("hbase.hregion.max.filesize", 1024 * 128);
+
+ Logger.getRootLogger().setLevel(Level.WARN);
+ Logger.getLogger(this.getClass().getPackage().getName()).
+ setLevel(Level.DEBUG);
+ }
+
+ /**
+ * Splits twice and verifies getting from each of the split regions.
+ * @throws Exception
+ */
+ public void testBasicSplit() throws Exception {
+ MiniDFSCluster cluster = null;
+ HRegion region = null;
+ try {
+ cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ cluster.getFileSystem().getHomeDirectory().toString());
+ HTableDescriptor htd = createTableDescriptor(getName());
+ region = createNewHRegion(htd, null, null);
+ basicSplit(region);
+ } finally {
+ if (region != null) {
+ region.close();
+ region.getLog().closeAndDelete();
+ }
+ if (cluster != null) {
+ StaticTestEnvironment.shutdownDfs(cluster);
+ }
+ }
+ }
+
+ private void basicSplit(final HRegion region) throws Exception {
+ addContent(region, COLFAMILY_NAME3);
+ region.flushcache();
+ Text midkey = new Text();
+ assertTrue(region.needsSplit(midkey));
+ HRegion [] regions = split(region);
+ try {
+ // Need to open the regions.
+ // TODO: Add an 'open' to HRegion... don't do open by constructing
+ // instance.
+ for (int i = 0; i < regions.length; i++) {
+ regions[i] = openClosedRegion(regions[i]);
+ }
+ // Assert can get rows out of new regions. Should be able to get first
+ // row from first region and the midkey from second region.
+ assertGet(regions[0], COLFAMILY_NAME3, new Text(START_KEY));
+ assertGet(regions[1], COLFAMILY_NAME3, midkey);
+ // Test I can get scanner and that it starts at right place.
+ assertScan(regions[0], COLFAMILY_NAME3, new Text(START_KEY));
+ assertScan(regions[1], COLFAMILY_NAME3, midkey);
+ // Now prove can't split regions that have references.
+ Text[] midkeys = new Text[regions.length];
+ for (int i = 0; i < regions.length; i++) {
+ midkeys[i] = new Text();
+ // Even after above splits, still needs split but after splits its
+ // unsplitable because biggest store file is reference. References
+ // make the store unsplittable, until something bigger comes along.
+ assertFalse(regions[i].needsSplit(midkeys[i]));
+ // Add so much data to this region, we create a store file that is >
+ // than
+ // one of our unsplitable references.
+ // it will.
+ for (int j = 0; j < 2; j++) {
+ addContent(regions[i], COLFAMILY_NAME3);
+ }
+ addContent(regions[i], COLFAMILY_NAME2);
+ addContent(regions[i], COLFAMILY_NAME1);
+ regions[i].flushcache();
+ }
+
+ // Assert that even if one store file is larger than a reference, the
+ // region is still deemed unsplitable (Can't split region if references
+ // presen).
+ for (int i = 0; i < regions.length; i++) {
+ midkeys[i] = new Text();
+ // Even after above splits, still needs split but after splits its
+ // unsplitable because biggest store file is reference. References
+ // make the store unsplittable, until something bigger comes along.
+ assertFalse(regions[i].needsSplit(midkeys[i]));
+ }
+
+ // To make regions splitable force compaction.
+ for (int i = 0; i < regions.length; i++) {
+ regions[i].compactStores();
+ }
+
+ TreeMap<String, HRegion> sortedMap = new TreeMap<String, HRegion>();
+ // Split these two daughter regions so then I'll have 4 regions. Will
+ // split because added data above.
+ for (int i = 0; i < regions.length; i++) {
+ HRegion[] rs = split(regions[i]);
+ for (int j = 0; j < rs.length; j++) {
+ sortedMap.put(rs[j].getRegionName().toString(),
+ openClosedRegion(rs[j]));
+ }
+ }
+ LOG.info("Made 4 regions");
+ // The splits should have been even. Test I can get some arbitrary row out
+ // of each.
+ int interval = (LAST_CHAR - FIRST_CHAR) / 3;
+ byte[] b = START_KEY.getBytes(HConstants.UTF8_ENCODING);
+ for (HRegion r : sortedMap.values()) {
+ assertGet(r, COLFAMILY_NAME3, new Text(new String(b,
+ HConstants.UTF8_ENCODING)));
+ b[0] += interval;
+ }
+ } finally {
+ for (int i = 0; i < regions.length; i++) {
+ try {
+ regions[i].close();
+ } catch (IOException e) {
+ // Ignore.
+ }
+ }
+ }
+ }
+
+ private void assertGet(final HRegion r, final String family, final Text k)
+ throws IOException {
+ // Now I have k, get values out and assert they are as expected.
+ byte [][] results = r.get(k, new Text(family),
+ Integer.MAX_VALUE);
+ for (int j = 0; j < results.length; j++) {
+ Text tmp = new Text(results[j]);
+ // Row should be equal to value every time.
+ assertEquals(k.toString(), tmp.toString());
+ }
+ }
+
+ /*
+ * Assert first value in the passed region is <code>firstValue</code>.
+ * @param r
+ * @param column
+ * @param firstValue
+ * @throws IOException
+ */
+ private void assertScan(final HRegion r, final String column,
+ final Text firstValue)
+ throws IOException {
+ Text [] cols = new Text[] {new Text(column)};
+ HScannerInterface s = r.getScanner(cols,
+ HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
+ try {
+ HStoreKey curKey = new HStoreKey();
+ TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+ boolean first = true;
+ OUTER_LOOP: while(s.next(curKey, curVals)) {
+ for(Text col: curVals.keySet()) {
+ byte [] val = curVals.get(col);
+ Text curval = new Text(val);
+ if (first) {
+ first = false;
+ assertTrue(curval.compareTo(firstValue) == 0);
+ } else {
+ // Not asserting anything. Might as well break.
+ break OUTER_LOOP;
+ }
+ }
+ }
+ } finally {
+ s.close();
+ }
+ }
+
+ private HRegion [] split(final HRegion r) throws IOException {
+ Text midKey = new Text();
+ assertTrue(r.needsSplit(midKey));
+ // Assert can get mid key from passed region.
+ assertGet(r, COLFAMILY_NAME3, midKey);
+ HRegion [] regions = r.splitRegion(null);
+ assertEquals(regions.length, 2);
+ return regions;
+ }
+}
Added: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java?rev=630550&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java (added)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestTimestamp.java Sat Feb 23 16:19:34 2008
@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.TreeMap;
+
+import org.apache.hadoop.dfs.MiniDFSCluster;
+
+import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.StaticTestEnvironment;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.HScannerInterface;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+
+/**
+ * Tests user specifiable time stamps putting, getting and scanning. Also
+ * tests same in presence of deletes. Test cores are written so can be
+ * run against an HRegion and against an HTable: i.e. both local and remote.
+ */
+public class TestTimestamp extends HBaseTestCase {
+ private static final long T0 = 10L;
+ private static final long T1 = 100L;
+ private static final long T2 = 200L;
+
+ private static final String COLUMN_NAME = "contents:";
+
+ private static final Text COLUMN = new Text(COLUMN_NAME);
+ private static final Text ROW = new Text("row");
+
+ // When creating column descriptor, how many versions of a cell to allow.
+ private static final int VERSIONS = 3;
+
+ private MiniDFSCluster cluster;
+
+ /** constructor */
+ public TestTimestamp() {
+ super();
+ this.cluster = null;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void setUp() throws Exception {
+ this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
+ // Set the hbase.rootdir to be the home directory in mini dfs.
+ this.conf.set(HConstants.HBASE_DIR,
+ this.cluster.getFileSystem().getHomeDirectory().toString());
+ super.setUp();
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void tearDown() throws Exception {
+ if (this.cluster != null) {
+ StaticTestEnvironment.shutdownDfs(cluster);
+ }
+ }
+
+ /**
+ * Test that delete works according to description in <a
+ * href="https://issues.apache.org/jira/browse/HADOOP-1784">hadoop-1784</a>.
+ * @throws IOException
+ */
+ public void testDelete() throws IOException {
+ final HRegion r = createRegion();
+ try {
+ final HRegionIncommon region = new HRegionIncommon(r);
+ doTestDelete(region, region);
+ } finally {
+ r.close();
+ r.getLog().closeAndDelete();
+ }
+ }
+
+ /**
+ * Test scanning against different timestamps.
+ * @throws IOException
+ */
+ public void testTimestampScanning() throws IOException {
+ final HRegion r = createRegion();
+ try {
+ final HRegionIncommon region = new HRegionIncommon(r);
+ doTestTimestampScanning(region, region);
+ } finally {
+ r.close();
+ r.getLog().closeAndDelete();
+ }
+ }
+
+ /**
+ * Basic test of timestamps.
+ * Do the above tests from client side.
+ * @throws IOException
+ */
+ public void testTimestamps() throws IOException {
+ final MiniHBaseCluster cluster =
+ new MiniHBaseCluster(this.conf, 1, this.cluster, true);
+ try {
+ HTable t = createTable();
+ Incommon incommon = new HTableIncommon(t);
+ doTestDelete(incommon, new FlushCache() {
+ public void flushcache() throws IOException {
+ cluster.flushcache();
+ }
+ });
+
+ // Perhaps drop and readd the table between tests so the former does
+ // not pollute this latter? Or put into separate tests.
+ doTestTimestampScanning(incommon, new FlushCache() {
+ public void flushcache() throws IOException {
+ cluster.flushcache();
+ }
+ });
+ } catch (Exception e) {
+ cluster.shutdown();
+ }
+ }
+
+ /*
+ * Run test that delete works according to description in <a
+ * href="https://issues.apache.org/jira/browse/HADOOP-1784">hadoop-1784</a>.
+ * @param incommon
+ * @param flusher
+ * @throws IOException
+ */
+ private void doTestDelete(final Incommon incommon, FlushCache flusher)
+ throws IOException {
+ // Add values at various timestamps (Values are timestampes as bytes).
+ put(incommon, T0);
+ put(incommon, T1);
+ put(incommon, T2);
+ put(incommon);
+ // Verify that returned versions match passed timestamps.
+ assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
+ // If I delete w/o specifying a timestamp, this means I'm deleting the
+ // latest.
+ delete(incommon);
+ // Verify that I get back T2 through T1 -- that the latest version has
+ // been deleted.
+ assertVersions(incommon, new long [] {T2, T1, T0});
+
+ // Flush everything out to disk and then retry
+ flusher.flushcache();
+ assertVersions(incommon, new long [] {T2, T1, T0});
+
+ // Now add, back a latest so I can test remove other than the latest.
+ put(incommon);
+ assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
+ delete(incommon, T2);
+ assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
+ // Flush everything out to disk and then retry
+ flusher.flushcache();
+ assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
+
+ // Now try deleting all from T2 back inclusive (We first need to add T2
+ // back into the mix and to make things a little interesting, delete and
+ // then readd T1.
+ put(incommon, T2);
+ delete(incommon, T1);
+ put(incommon, T1);
+ incommon.deleteAll(ROW, COLUMN, T2);
+ // Should only be current value in set. Assert this is so
+ assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
+
+ // Flush everything out to disk and then redo above tests
+ flusher.flushcache();
+ assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
+ }
+
+ private void assertOnlyLatest(final Incommon incommon,
+ final long currentTime)
+ throws IOException {
+ byte [][] bytesBytes = incommon.get(ROW, COLUMN, 3/*Ask for too much*/);
+ assertEquals(1, bytesBytes.length);
+ long time = Writables.bytesToLong(bytesBytes[0]);
+ assertEquals(time, currentTime);
+ assertNull(incommon.get(ROW, COLUMN, T1, 3 /*Too many*/));
+ assertTrue(assertScanContentTimestamp(incommon, T1) == 0);
+ }
+
+ /*
+ * Assert that returned versions match passed in timestamps and that results
+ * are returned in the right order. Assert that values when converted to
+ * longs match the corresponding passed timestamp.
+ * @param r
+ * @param tss
+ * @throws IOException
+ */
+ private void assertVersions(final Incommon incommon, final long [] tss)
+ throws IOException {
+ // Assert that 'latest' is what we expect.
+ byte [] bytes = incommon.get(ROW, COLUMN);
+ assertEquals(Writables.bytesToLong(bytes), tss[0]);
+ // Now assert that if we ask for multiple versions, that they come out in
+ // order.
+ byte [][] bytesBytes = incommon.get(ROW, COLUMN, tss.length);
+ assertEquals(tss.length, bytesBytes.length);
+ for (int i = 0; i < bytesBytes.length; i++) {
+ long ts = Writables.bytesToLong(bytesBytes[i]);
+ assertEquals(ts, tss[i]);
+ }
+ // Specify a timestamp get multiple versions.
+ bytesBytes = incommon.get(ROW, COLUMN, tss[0], bytesBytes.length - 1);
+ for (int i = 1; i < bytesBytes.length; i++) {
+ long ts = Writables.bytesToLong(bytesBytes[i]);
+ assertEquals(ts, tss[i]);
+ }
+ // Test scanner returns expected version
+ assertScanContentTimestamp(incommon, tss[0]);
+ }
+
+ /*
+ * Run test scanning different timestamps.
+ * @param incommon
+ * @param flusher
+ * @throws IOException
+ */
+ private void doTestTimestampScanning(final Incommon incommon,
+ final FlushCache flusher)
+ throws IOException {
+ // Add a couple of values for three different timestamps.
+ put(incommon, T0);
+ put(incommon, T1);
+ put(incommon, HConstants.LATEST_TIMESTAMP);
+ // Get count of latest items.
+ int count = assertScanContentTimestamp(incommon,
+ HConstants.LATEST_TIMESTAMP);
+ // Assert I get same count when I scan at each timestamp.
+ assertEquals(count, assertScanContentTimestamp(incommon, T0));
+ assertEquals(count, assertScanContentTimestamp(incommon, T1));
+ // Flush everything out to disk and then retry
+ flusher.flushcache();
+ assertEquals(count, assertScanContentTimestamp(incommon, T0));
+ assertEquals(count, assertScanContentTimestamp(incommon, T1));
+ }
+
+ /*
+ * Assert that the scan returns only values < timestamp.
+ * @param r
+ * @param ts
+ * @return Count of items scanned.
+ * @throws IOException
+ */
+ private int assertScanContentTimestamp(final Incommon in, final long ts)
+ throws IOException {
+ HScannerInterface scanner =
+ in.getScanner(COLUMNS, HConstants.EMPTY_START_ROW, ts);
+ int count = 0;
+ try {
+ HStoreKey key = new HStoreKey();
+ TreeMap<Text, byte []>value = new TreeMap<Text, byte[]>();
+ while (scanner.next(key, value)) {
+ assertTrue(key.getTimestamp() <= ts);
+ // Content matches the key or HConstants.LATEST_TIMESTAMP.
+ // (Key does not match content if we 'put' with LATEST_TIMESTAMP).
+ long l = Writables.bytesToLong(value.get(COLUMN));
+ assertTrue(key.getTimestamp() == l ||
+ HConstants.LATEST_TIMESTAMP == l);
+ count++;
+ value.clear();
+ }
+ } finally {
+ scanner.close();
+ }
+ return count;
+ }
+
+ private void put(final Incommon loader, final long ts)
+ throws IOException {
+ put(loader, Writables.longToBytes(ts), ts);
+ }
+
+ private void put(final Incommon loader)
+ throws IOException {
+ long ts = HConstants.LATEST_TIMESTAMP;
+ put(loader, Writables.longToBytes(ts), ts);
+ }
+
+ /*
+ * Put values.
+ * @param loader
+ * @param bytes
+ * @param ts
+ * @throws IOException
+ */
+ private void put(final Incommon loader, final byte [] bytes,
+ final long ts)
+ throws IOException {
+ long lockid = loader.startUpdate(ROW);
+ loader.put(lockid, COLUMN, bytes);
+ if (ts == HConstants.LATEST_TIMESTAMP) {
+ loader.commit(lockid);
+ } else {
+ loader.commit(lockid, ts);
+ }
+ }
+
+ private void delete(final Incommon loader) throws IOException {
+ delete(loader, HConstants.LATEST_TIMESTAMP);
+ }
+
+ private void delete(final Incommon loader, final long ts) throws IOException {
+ long lockid = loader.startUpdate(ROW);
+ loader.delete(lockid, COLUMN);
+ if (ts == HConstants.LATEST_TIMESTAMP) {
+ loader.commit(lockid);
+ } else {
+ loader.commit(lockid, ts);
+ }
+ }
+
+ /*
+ * Create a table named TABLE_NAME.
+ * @return An instance of an HTable connected to the created table.
+ * @throws IOException
+ */
+ private HTable createTable() throws IOException {
+ HTableDescriptor desc = new HTableDescriptor(getName());
+ desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ admin.createTable(desc);
+ return new HTable(conf, new Text(getName()));
+ }
+
+ private HRegion createRegion() throws IOException {
+ HTableDescriptor htd = createTableDescriptor(getName());
+ htd.addFamily(new HColumnDescriptor(COLUMN, VERSIONS,
+ CompressionType.NONE, false, false, Integer.MAX_VALUE, null));
+ return createNewHRegion(htd, null, null);
+ }
+}
Modified: hadoop/hbase/trunk/src/webapps/regionserver/regionserver.jsp
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/webapps/regionserver/regionserver.jsp?rev=630550&r1=630549&r2=630550&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/webapps/regionserver/regionserver.jsp (original)
+++ hadoop/hbase/trunk/src/webapps/regionserver/regionserver.jsp Sat Feb 23 16:19:34 2008
@@ -2,8 +2,8 @@
import="java.util.*"
import="org.apache.hadoop.io.Text"
import="org.apache.hadoop.util.VersionInfo"
- import="org.apache.hadoop.hbase.HRegionServer"
- import="org.apache.hadoop.hbase.HRegion"
+ import="org.apache.hadoop.hbase.regionserver.HRegionServer"
+ import="org.apache.hadoop.hbase.regionserver.HRegion"
import="org.apache.hadoop.hbase.HConstants"
import="org.apache.hadoop.hbase.HServerInfo"
import="org.apache.hadoop.hbase.HRegionInfo" %><%