You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ns...@apache.org on 2011/10/11 19:45:12 UTC
svn commit: r1181975 - in
/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual:
HBaseTest.java utils/DataGenerator.java utils/MultiThreadedWriter.java
Author: nspiegelberg
Date: Tue Oct 11 17:45:12 2011
New Revision: 1181975
URL: http://svn.apache.org/viewvc?rev=1181975&view=rev
Log:
HBase load testing as a map-reduce job
Summary:
Added a script to start a streaming map-reduce job where each map
tasks runs an instance of the load tester for a partition of the key-space.
The load tester now takes a parameter indicating the start key for write
operations.
Test Plan: ran the map-reduce load test on a dev cluster
Reviewers: kranganathan
Reviewed By: kranganathan
CC: hbase@lists, kranganathan
Differential Revision: 324061
Task ID: 697898
Modified:
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/HBaseTest.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/DataGenerator.java
hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/MultiThreadedWriter.java
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/HBaseTest.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/HBaseTest.java?rev=1181975&r1=1181974&r2=1181975&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/HBaseTest.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/HBaseTest.java Tue Oct 11 17:45:12 2011
@@ -74,6 +74,7 @@ public class HBaseTest
// usage string for loading data
static final String OPT_USAGE_LOAD = " <num keys>:<average cols per key>:<avg data size>[:<num threads = 20>]";
+ static final String OPT_USAGE_LOAD_START_KEY = " <start key>";
/**
* Reads the following params from the command line:
* <Number of keys to load>:<Average columns per key>:<Average data size per column>[:<num threads = 20>]
@@ -83,6 +84,10 @@ public class HBaseTest
String[] cols = cmd_.getOptionValue(OPT_LOAD).split(":");
long startKey = 0;
long endKey = Long.parseLong(cols[0]);
+ if (cmd_.hasOption(OPT_LOAD_START_KEY)) {
+ startKey = Long.parseLong(cmd_.getOptionValue(OPT_LOAD_START_KEY));
+ endKey += startKey;
+ }
long minColsPerKey = 1;
long maxColsPerKey = 2 * Long.parseLong(cols[1]);
int minColDataSize = Integer.parseInt(cols[2])/2;
@@ -269,6 +274,7 @@ public class HBaseTest
private static final String FOOTER = "";
private static final String OPT_ZKNODE = "zk";
private static final String OPT_LOAD = "load";
+ private static final String OPT_LOAD_START_KEY = "load_start_key";
private static final String OPT_READ = "read";
private static final String OPT_KILL = "kill";
private static final String OPT_APPEND = "append";
@@ -281,12 +287,14 @@ public class HBaseTest
+ " -" + OPT_ZKNODE + " <Zookeeper node>"
+ " _" + OPT_TABLE_NAME + " <Table name>"
+ " -" + OPT_LOAD + OPT_USAGE_LOAD
+ + " -" + OPT_LOAD_START_KEY + OPT_USAGE_LOAD_START_KEY
+ " -" + OPT_READ + OPT_USAGE_READ
+ " -" + OPT_KILL + OPT_USAGE_KILL;
// add options
options_.addOption(OPT_ZKNODE , true, "Zookeeper node in the HBase cluster");
options_.addOption(OPT_TABLE_NAME, true, "The name of the table to be read or write");
options_.addOption(OPT_LOAD , true, OPT_USAGE_LOAD);
+ options_.addOption(OPT_LOAD_START_KEY, true, OPT_USAGE_LOAD_START_KEY);
options_.addOption(OPT_READ , true, OPT_USAGE_READ);
options_.addOption(OPT_KILL , true, OPT_USAGE_KILL);
options_.addOption(OPT_APPEND , true, OPT_USAGE_APPEND);
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/DataGenerator.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/DataGenerator.java?rev=1181975&r1=1181974&r2=1181975&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/DataGenerator.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/DataGenerator.java Tue Oct 11 17:45:12 2011
@@ -43,23 +43,23 @@ public class DataGenerator {
maxDataSize_ = maxDataSize;
}
- private static byte[] getDataForKey(String rowKey, int dataSize) {
+ private static byte[] getDataForKeyColumn(String rowKey, String column, int dataSize) {
// Need a different local random object since multiple threads might invoke
// this method at the same time.
- Random random = new Random(rowKey.hashCode());
+ Random random = new Random(rowKey.hashCode() + column.hashCode());
byte[] rbytes = new byte[dataSize];
random.nextBytes(rbytes);
return rbytes;
}
- public byte[] getDataInSize(long key) {
+ public byte[] getDataInSize(long key, String column) {
String rowKey = DataGenerator.md5PrefixedKey(key);
int dataSize = minDataSize_ + random_.nextInt(Math.abs(maxDataSize_ - minDataSize_));
- return getDataForKey(rowKey, dataSize);
+ return getDataForKeyColumn(rowKey, column, dataSize);
}
public static boolean verify(String rowKey, String actionId, byte[] data) {
- byte[] expectedData = getDataForKey(rowKey, data.length);
+ byte[] expectedData = getDataForKeyColumn(rowKey, actionId, data.length);
return (Bytes.equals(expectedData, data));
}
}
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/MultiThreadedWriter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/MultiThreadedWriter.java?rev=1181975&r1=1181974&r2=1181975&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/MultiThreadedWriter.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/manual/utils/MultiThreadedWriter.java Tue Oct 11 17:45:12 2011
@@ -155,7 +155,7 @@ public class MultiThreadedWriter extends
public void insert(long rowKey, long col) {
Put put = new Put(longToByteArrayKey(rowKey));
- put.add(columnFamily_, ("" + col).getBytes(), dataGenerator_.getDataInSize(rowKey));
+ put.add(columnFamily_, ("" + col).getBytes(), dataGenerator_.getDataInSize(rowKey, "" + col));
try {
long start = System.currentTimeMillis();
putIntoTables(put);
@@ -183,7 +183,7 @@ public class MultiThreadedWriter extends
byte[] columnQualifier;
byte[] value;
for(long i = startCol; i < endCol; ++i) {
- value = dataGenerator_.getDataInSize(rowKey);
+ value = dataGenerator_.getDataInSize(rowKey, "" + i);
columnQualifier = ("" + i).getBytes();
put.add(columnFamily_, columnQualifier, value);
}