You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2016/02/13 21:10:13 UTC
[02/22] hbase git commit: HBASE-11792 Organize Performance Evaluation
usage output
HBASE-11792 Organize Performance Evaluation usage output
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7cab2472
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7cab2472
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7cab2472
Branch: refs/heads/hbase-12439
Commit: 7cab24729d4585689af745df179d6ae92b2a6248
Parents: 7bb68b9
Author: Misty Stanley-Jones <ms...@cloudera.com>
Authored: Thu Dec 17 13:15:52 2015 -0800
Committer: Misty Stanley-Jones <ms...@cloudera.com>
Committed: Tue Feb 9 11:16:22 2016 -0800
----------------------------------------------------------------------
.../hbase/rest/PerformanceEvaluation.java | 10 ++--
.../hadoop/hbase/PerformanceEvaluation.java | 50 ++++++++++----------
2 files changed, 33 insertions(+), 27 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/7cab2472/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
index dcd5b0a..e207735 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
@@ -1366,10 +1366,12 @@ public class PerformanceEvaluation extends Configured implements Tool {
System.err.println(" [--compress=TYPE] [--blockEncoding=TYPE] " +
"[-D<property=value>]* <command> <nclients>");
System.err.println();
- System.err.println("Options:");
+ System.err.println("General Options:");
System.err.println(" nomapred Run multiple clients using threads " +
"(rather than use mapreduce)");
System.err.println(" rows Rows each client runs. Default: One million");
+ System.err.println();
+ System.err.println("Table Creation / Write Tests:");
System.err.println(" table Alternate table name. Default: 'TestTable'");
System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'");
System.err.println(" flushCommits Used to determine if the test should flush the table. " +
@@ -1377,13 +1379,15 @@ public class PerformanceEvaluation extends Configured implements Tool {
System.err.println(" writeToWAL Set writeToWAL on puts. Default: True");
System.err.println(" presplit Create presplit table. Recommended for accurate perf " +
"analysis (see guide). Default: disabled");
- System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " +
- "possible. Not guaranteed that reads are always served from inmemory. Default: false");
System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " +
"Default : false");
System.err.println(" numoftags Specify the no of tags that would be needed. " +
"This works only if usetags is true.");
System.err.println();
+ System.err.println("Read Tests:");
+ System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " +
+ "possible. Not guaranteed that reads are always served from inmemory. Default: false");
+ System.err.println();
System.err.println(" Note: -D properties will be applied to the conf used. ");
System.err.println(" For example: ");
System.err.println(" -Dmapreduce.output.fileoutputformat.compress=true");
http://git-wip-us.apache.org/repos/asf/hbase/blob/7cab2472/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 651bc86..a31cc06 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -1827,56 +1827,58 @@ public class PerformanceEvaluation extends Configured implements Tool {
System.err.println("Usage: java " + className + " \\");
System.err.println(" <OPTIONS> [-D<property=value>]* <command> <nclients>");
System.err.println();
- System.err.println("Options:");
+ System.err.println("General Options:");
System.err.println(" nomapred Run multiple clients using threads " +
"(rather than use mapreduce)");
- System.err.println(" rows Rows each client runs. Default: One million");
- System.err.println(" size Total size in GiB. Mutually exclusive with --rows. " +
- "Default: 1.0.");
+ System.err.println(" oneCon all the threads share the same connection. Default: False");
System.err.println(" sampleRate Execute test on a sample of total " +
"rows. Only supported by randomRead. Default: 1.0");
+ System.err.println(" period Report every 'period' rows: " +
+ "Default: opts.perClientRunRows / 10");
+ System.err.println(" cycles How many times to cycle the test. Defaults: 1.");
System.err.println(" traceRate Enable HTrace spans. Initiate tracing every N rows. " +
"Default: 0");
+ System.err.println(" latency Set to report operation latencies. Default: False");
+ System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" +
+ " rows have been treated. Default: 0");
+ System.err.println(" valueSize Pass value size to use: Default: 1024");
+ System.err.println(" valueRandom Set if we should vary value size between 0 and " +
+ "'valueSize'; set on read for stats on size: Default: Not set.");
+ System.err.println();
+ System.err.println("Table Creation / Write Tests:");
System.err.println(" table Alternate table name. Default: 'TestTable'");
- System.err.println(" multiGet If >0, when doing RandomRead, perform multiple gets " +
- "instead of single gets. Default: 0");
+ System.err.println(" rows Rows each client runs. Default: One million");
+ System.err.println(" size Total size in GiB. Mutually exclusive with --rows. " +
+ "Default: 1.0.");
System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'");
System.err.println(" flushCommits Used to determine if the test should flush the table. " +
"Default: false");
+ System.err.println(" valueZipf Set if we should vary value size between 0 and " +
+ "'valueSize' in zipf form: Default: Not set.");
System.err.println(" writeToWAL Set writeToWAL on puts. Default: True");
System.err.println(" autoFlush Set autoFlush on htable. Default: False");
- System.err.println(" oneCon all the threads share the same connection. Default: False");
System.err.println(" presplit Create presplit table. Recommended for accurate perf " +
"analysis (see guide). Default: disabled");
- System.err.println(" inmemory Tries to keep the HFiles of the CF " +
- "inmemory as far as possible. Not guaranteed that reads are always served " +
- "from memory. Default: false");
System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " +
"Default: false");
System.err.println(" numoftags Specify the no of tags that would be needed. " +
"This works only if usetags is true.");
+ System.err.println(" splitPolicy Specify a custom RegionSplitPolicy for the table.");
+ System.err.println(" columns Columns to write per row. Default: 1");
+ System.err.println();
+ System.err.println("Read Tests:");
System.err.println(" filterAll Helps to filter out all the rows on the server side"
+ " there by not returning any thing back to the client. Helps to check the server side"
+ " performance. Uses FilterAllFilter internally. ");
- System.err.println(" latency Set to report operation latencies. Default: False");
- System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" +
- " rows have been treated. Default: 0");
- System.err.println(" bloomFilter Bloom filter type, one of " + Arrays.toString(BloomType.values()));
- System.err.println(" valueSize Pass value size to use: Default: 1024");
- System.err.println(" valueRandom Set if we should vary value size between 0 and " +
- "'valueSize'; set on read for stats on size: Default: Not set.");
- System.err.println(" valueZipf Set if we should vary value size between 0 and " +
- "'valueSize' in zipf form: Default: Not set.");
- System.err.println(" period Report every 'period' rows: " +
- "Default: opts.perClientRunRows / 10");
System.err.println(" multiGet Batch gets together into groups of N. Only supported " +
"by randomRead. Default: disabled");
+ System.err.println(" inmemory Tries to keep the HFiles of the CF " +
+ "inmemory as far as possible. Not guaranteed that reads are always served " +
+ "from memory. Default: false");
+ System.err.println(" bloomFilter Bloom filter type, one of " + Arrays.toString(BloomType.values()));
System.err.println(" addColumns Adds columns to scans/gets explicitly. Default: true");
System.err.println(" replicas Enable region replica testing. Defaults: 1.");
- System.err.println(" cycles How many times to cycle the test. Defaults: 1.");
- System.err.println(" splitPolicy Specify a custom RegionSplitPolicy for the table.");
System.err.println(" randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0");
- System.err.println(" columns Columns to write per row. Default: 1");
System.err.println(" caching Scan caching to use. Default: 30");
System.err.println();
System.err.println(" Note: -D properties will be applied to the conf used. ");