You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/04/28 10:27:27 UTC

svn commit: r769286 - in /hadoop/hbase/trunk: CHANGES.txt src/java/org/apache/hadoop/hbase/client/HTable.java

Author: stack
Date: Tue Apr 28 08:27:26 2009
New Revision: 769286

URL: http://svn.apache.org/viewvc?rev=769286&view=rev
Log:
HBASE-1350 New method in HTable.java to return start and end keys for regions in a table 

Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=769286&r1=769285&r2=769286&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Tue Apr 28 08:27:26 2009
@@ -170,6 +170,8 @@
                provide additional ByteBuffer primitives (Jon Gray via Stack)
    HBASE-1183  New MR splitting algorithm and other new features need a way to
                split a key range in N chunks (Jon Gray via Stack)
+   HBASE-1350  New method in HTable.java to return start and end keys for
+               regions in a table (Vimal Mathew via Stack)
 
 Release 0.19.0 - 01/21/2009
   INCOMPATIBLE CHANGES

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java?rev=769286&r1=769285&r2=769286&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java Tue Apr 28 08:27:26 2009
@@ -47,6 +47,7 @@
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.io.HbaseMapWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Writables;
 
 /**
@@ -227,21 +228,45 @@
    * @throws IOException
    */
   public byte [][] getStartKeys() throws IOException {
-    final List<byte[]> keyList = new ArrayList<byte[]>();
+    return getStartEndKeys().getFirst();
+  }
+
+  /**
+   * Gets the ending row key for every region in the currently open table
+   * 
+   * @return Array of region ending row keys
+   * @throws IOException
+   */
+  public byte[][] getEndKeys() throws IOException {
+    return getStartEndKeys().getSecond();
+  }
+
+  /**
+   * Gets the starting and ending row keys for every region in the currently open table
+   * 
+   * @return Pair of arrays of region starting and ending row keys
+   * @throws IOException
+   */
+  @SuppressWarnings("unchecked")
+  public Pair<byte[][],byte[][]> getStartEndKeys() throws IOException {
+    final List<byte[]> startKeyList = new ArrayList<byte[]>();
+    final List<byte[]> endKeyList = new ArrayList<byte[]>();
     MetaScannerVisitor visitor = new MetaScannerVisitor() {
       public boolean processRow(RowResult rowResult) throws IOException {
         HRegionInfo info = Writables.getHRegionInfo(
             rowResult.get(HConstants.COL_REGIONINFO));
         if (Bytes.equals(info.getTableDesc().getName(), getTableName())) {
           if (!(info.isOffline() || info.isSplit())) {
-            keyList.add(info.getStartKey());
+            startKeyList.add(info.getStartKey());
+            endKeyList.add(info.getEndKey());
           }
         }
         return true;
       }
     };
     MetaScanner.metaScan(configuration, visitor, this.tableName);
-    return keyList.toArray(new byte[keyList.size()][]);
+    return new Pair(startKeyList.toArray(new byte[startKeyList.size()][]),
+                endKeyList.toArray(new byte[endKeyList.size()][]));
   }
 
   /**