You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2008/11/28 11:42:10 UTC

svn commit: r721422 - in /hadoop/hbase/branches/0.19_on_hadoop_0.18: ./ src/java/org/apache/hadoop/hbase/metrics/ src/java/org/apache/hadoop/hbase/metrics/file/ src/test/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/client/

Author: apurtell
Date: Fri Nov 28 02:42:09 2008
New Revision: 721422

URL: http://svn.apache.org/viewvc?rev=721422&view=rev
Log:
sweep changes missed by last commit

Added:
    hadoop/hbase/branches/0.19_on_hadoop_0.18/src/java/org/apache/hadoop/hbase/metrics/
    hadoop/hbase/branches/0.19_on_hadoop_0.18/src/java/org/apache/hadoop/hbase/metrics/file/
    hadoop/hbase/branches/0.19_on_hadoop_0.18/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java
    hadoop/hbase/branches/0.19_on_hadoop_0.18/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java
Removed:
    hadoop/hbase/branches/0.19_on_hadoop_0.18/src/test/org/apache/hadoop/hbase/TestToString.java
Modified:
    hadoop/hbase/branches/0.19_on_hadoop_0.18/CHANGES.txt

Modified: hadoop/hbase/branches/0.19_on_hadoop_0.18/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.19_on_hadoop_0.18/CHANGES.txt?rev=721422&r1=721421&r2=721422&view=diff
==============================================================================
--- hadoop/hbase/branches/0.19_on_hadoop_0.18/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.19_on_hadoop_0.18/CHANGES.txt Fri Nov 28 02:42:09 2008
@@ -144,6 +144,7 @@
    HBASE-1026  Tests in mapred are failing
    HBASE-1020  Regionserver OOME handler should dump vital stats
    HBASE-1018  Regionservers should report detailed health to master
+   HBASE-1034  Remove useless TestToString unit test
 
   NEW FEATURES
    HBASE-875   Use MurmurHash instead of JenkinsHash [in bloomfilters]

Added: hadoop/hbase/branches/0.19_on_hadoop_0.18/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.19_on_hadoop_0.18/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java?rev=721422&view=auto
==============================================================================
--- hadoop/hbase/branches/0.19_on_hadoop_0.18/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java (added)
+++ hadoop/hbase/branches/0.19_on_hadoop_0.18/src/java/org/apache/hadoop/hbase/metrics/file/TimeStampingFileContext.java Fri Nov 28 02:42:09 2008
@@ -0,0 +1,106 @@
+/**
+ * Copyright 2008 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.metrics.file;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+import org.apache.hadoop.metrics.ContextFactory;
+import org.apache.hadoop.metrics.file.FileContext;
+import org.apache.hadoop.metrics.spi.OutputRecord;
+
+/**
+ * Add timestamp to {@link org.apache.hadoop.metrics.file.FileContext#emitRecord(String, String, OutputRecord)}.
+ */
+public class TimeStampingFileContext extends FileContext {
+  // Copies bunch of FileContext here because writer and file are private in
+  // superclass.
+  private File file = null;
+  private PrintWriter writer = null;
+  private final SimpleDateFormat sdf;
+  
+  public TimeStampingFileContext() {
+    super();
+    this.sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
+  }
+
+  public void init(String contextName, ContextFactory factory) {
+    super.init(contextName, factory);
+    String fileName = getAttribute(FILE_NAME_PROPERTY);
+    if (fileName != null) {
+      file = new File(fileName);
+    }
+  }
+
+  public void startMonitoring() throws IOException {
+    if (file == null) {
+      writer = new PrintWriter(new BufferedOutputStream(System.out));
+    } else {
+      writer = new PrintWriter(new FileWriter(file, true));
+    }
+    super.startMonitoring();
+  }
+
+  public void stopMonitoring() {
+    super.stopMonitoring();
+    if (writer != null) {
+      writer.close();
+      writer = null;
+    }
+  }
+
+  private synchronized String iso8601() {
+    return this.sdf.format(new Date());
+  }
+
+  public void emitRecord(String contextName, String recordName,
+      OutputRecord outRec) {
+    writer.print(iso8601());
+    writer.print(" ");
+    writer.print(contextName);
+    writer.print(".");
+    writer.print(recordName);
+    String separator = ": ";
+    for (String tagName : outRec.getTagNames()) {
+      writer.print(separator);
+      separator = ", ";
+      writer.print(tagName);
+      writer.print("=");
+      writer.print(outRec.getTag(tagName));
+    }
+    for (String metricName : outRec.getMetricNames()) {
+      writer.print(separator);
+      separator = ", ";
+      writer.print(metricName);
+      writer.print("=");
+      writer.print(outRec.getMetric(metricName));
+    }
+    writer.println();
+  }
+
+  public void flush() {
+    writer.flush();
+  }
+}
\ No newline at end of file

Added: hadoop/hbase/branches/0.19_on_hadoop_0.18/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.19_on_hadoop_0.18/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java?rev=721422&view=auto
==============================================================================
--- hadoop/hbase/branches/0.19_on_hadoop_0.18/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java (added)
+++ hadoop/hbase/branches/0.19_on_hadoop_0.18/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java Fri Nov 28 02:42:09 2008
@@ -0,0 +1,119 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Tests forced splitting of HTable
+ */
+public class TestForceSplit extends HBaseClusterTestCase {
+  private static final byte[] tableName = Bytes.toBytes("test");
+  private static final byte[] columnName = Bytes.toBytes("a:");
+  private static final byte[] key_mmi = Bytes.toBytes("mmi");
+  private static final byte[] key_ssm = Bytes.toBytes("ssm");
+
+  /**
+   * the test
+   * @throws IOException
+   */
+  public void testHTable() throws Exception {
+    // create the test table
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    htd.addFamily(new HColumnDescriptor(columnName));
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(htd);
+    HTable table = new HTable(conf, tableName);
+    byte[] k = new byte[3];
+    for (byte b1 = 'a'; b1 < 'z'; b1++) {
+      for (byte b2 = 'a'; b2 < 'z'; b2++) {
+        for (byte b3 = 'a'; b3 < 'z'; b3++) {
+          k[0] = b1;
+          k[1] = b2;
+          k[2] = b3;
+          BatchUpdate update = new BatchUpdate(k);
+          update.put(columnName, k);
+          table.commit(update);
+        }
+      }
+    }
+
+    // get the initial layout (should just be one region)
+    Map<HRegionInfo,HServerAddress> m = table.getRegionsInfo();
+    System.out.println("Initial regions (" + m.size() + "): " + m);
+    assertTrue(m.size() == 1);
+
+    // tell the master to split the table
+    admin.modifyTable(tableName, HConstants.MODIFY_TABLE_SPLIT);
+
+    // give some time for the split to happen
+    Thread.sleep(15 * 1000);
+
+    // check again
+    table = new HTable(conf, tableName);
+    m = table.getRegionsInfo();
+    System.out.println("Regions after split (" + m.size() + "): " + m);
+    // should have two regions now
+    assertTrue(m.size() == 2);
+    // and "mmi" should be the midpoint
+    for (HRegionInfo hri: m.keySet()) {
+      byte[] start = hri.getStartKey();
+      byte[] end = hri.getEndKey();
+      if (Bytes.equals(start, HConstants.EMPTY_BYTE_ARRAY))
+        assertTrue(Bytes.equals(end, key_mmi));
+      if (Bytes.equals(end, key_mmi))
+        assertTrue(Bytes.equals(start, HConstants.EMPTY_BYTE_ARRAY));
+    }
+
+    // tell the master to split the table again, the second half
+    admin.modifyTable(tableName, HConstants.MODIFY_TABLE_SPLIT, key_mmi);
+
+    // give some time for the split to happen
+    Thread.sleep(15 * 1000);
+
+    // check again
+    table = new HTable(conf, tableName);
+    m = table.getRegionsInfo();
+    System.out.println("Regions after split (" + m.size() + "): " + m);
+    // should have three regions now
+    assertTrue(m.size() == 3);
+    // and "mmi" and "ssm" should be the midpoints
+    for (HRegionInfo hri: m.keySet()) {
+      byte[] start = hri.getStartKey();
+      byte[] end = hri.getEndKey();
+      if (Bytes.equals(start, HConstants.EMPTY_BYTE_ARRAY))
+        assertTrue(Bytes.equals(end, key_mmi));
+      if (Bytes.equals(start, key_mmi))
+        assertTrue(Bytes.equals(end, key_ssm));
+      if (Bytes.equals(start, key_ssm))
+        assertTrue(Bytes.equals(end, HConstants.EMPTY_BYTE_ARRAY));
+    }
+  }
+}