You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/10/05 23:10:02 UTC

svn commit: r822015 - in /hadoop/hbase/branches/0.20: CHANGES.txt src/java/org/apache/hadoop/hbase/util/CompressionTest.java

Author: stack
Date: Mon Oct  5 21:10:01 2009
New Revision: 822015

URL: http://svn.apache.org/viewvc?rev=822015&view=rev
Log:
HBASE-1875 Compression test utility

Added:
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/CompressionTest.java
Modified:
    hadoop/hbase/branches/0.20/CHANGES.txt

Modified: hadoop/hbase/branches/0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/CHANGES.txt?rev=822015&r1=822014&r2=822015&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.20/CHANGES.txt Mon Oct  5 21:10:01 2009
@@ -63,6 +63,7 @@
                (listTables, tableExists), is very slow if the client is far
                away from the HBase cluster (Andrei Dragomir via Stack)
    HBASE-1879  ReadOnly transactions generate WAL activity (Clint Morgan via Stack)
+   HBASE-1875  Compression test utility (elsif via Stack)
 
 Release 0.20.0 - Tue Sep  8 12:48:41 PDT 2009
 

Added: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/CompressionTest.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/CompressionTest.java?rev=822015&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/CompressionTest.java (added)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/util/CompressionTest.java Mon Oct  5 21:10:01 2009
@@ -0,0 +1,89 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+
+/**
+ * Compression validation test.  Checks compression is working.  Be sure to run
+ * on every node in your cluster.
+ */
+public class CompressionTest {
+  protected static Path path = new Path(".hfile-comp-test");
+
+  public static void usage() {
+    System.out.println("Usage: CompressionTest HDFS_PATH none|gz|lzo");
+    System.exit(0);
+  }
+
+  protected static DistributedFileSystem openConnection(String urlString)
+  throws java.net.URISyntaxException, java.io.IOException {
+    URI dfsUri = new URI(urlString);
+    Configuration dfsConf = new Configuration();
+    DistributedFileSystem dfs = new DistributedFileSystem();
+    dfs.initialize(dfsUri, dfsConf);
+    return dfs;
+  }
+
+  protected static boolean closeConnection(DistributedFileSystem dfs) {
+    if (dfs != null) {
+      try {
+        dfs.close();
+        dfs = null;
+      } catch (Exception e) {
+        e.printStackTrace();
+      }
+    }
+    return dfs == null;
+  }
+
+  public static void main(String[] args) {
+    if (args.length != 2) usage();
+    try {
+      DistributedFileSystem dfs = openConnection(args[0]);
+      dfs.delete(path, false);
+      HFile.Writer writer = new HFile.Writer(dfs, path,
+        HFile.DEFAULT_BLOCKSIZE, args[1], null);
+      writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval"));
+      writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
+      writer.close();
+
+      HFile.Reader reader = new HFile.Reader(dfs, path, null, false);
+      reader.loadFileInfo();
+      byte[] key = reader.getFirstKey();
+      boolean rc = Bytes.toString(key).equals("testkey");
+      reader.close();
+
+      dfs.delete(path, false);
+      closeConnection(dfs);
+
+      if (rc) System.exit(0);
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+    System.out.println("FAILED");
+    System.exit(1);
+  }
+}
\ No newline at end of file