You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2007/08/03 02:02:21 UTC

svn commit: r562294 - in /lucene/hadoop/trunk/src/contrib/hbase: CHANGES.txt src/java/org/apache/hadoop/hbase/HConnectionManager.java src/java/org/apache/hadoop/hbase/HTable.java src/test/org/apache/hadoop/hbase/TestHTable.java

Author: jimk
Date: Thu Aug  2 17:02:19 2007
New Revision: 562294

URL: http://svn.apache.org/viewvc?view=rev&rev=562294
Log:
HADOOP-1528 HClient for multiple tables - expose close table function

HTable

    * added public method close
    * added protected method checkClosed
    * make getConnection public

HConnectionManager

    * a call to getTableServers or reloadTableServers will cause information for closed
      tables to be reloaded

TestHTable

    * new test case

Added:
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHTable.java
Modified:
    lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java

Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt?view=diff&rev=562294&r1=562293&r2=562294
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt Thu Aug  2 17:02:19 2007
@@ -83,4 +83,4 @@
  52. HADOOP-1528 HClient for multiple tables (phase 2) all HBase client side code
      (except TestHClient and HBaseShell) have been converted to use the new client
      side objects (HTable/HBaseAdmin/HConnection) instead of HClient.
-
+ 53. HADOOP-1528 HClient for multiple tables - expose close table function

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java?view=diff&rev=562294&r1=562293&r2=562294
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HConnectionManager.java Thu Aug  2 17:02:19 2007
@@ -276,9 +276,7 @@
             "table name cannot be null or zero length");
       }
 
-      if (closedTables.contains(tableName)) {
-        throw new IllegalStateException("table closed: " + tableName);
-      }
+      closedTables.remove(tableName);
       
       SortedMap<Text, HRegionLocation> tableServers  =
         tablesToServers.get(tableName);
@@ -302,9 +300,7 @@
     public SortedMap<Text, HRegionLocation>
     reloadTableServers(final Text tableName) throws IOException {
       
-      if (closedTables.contains(tableName)) {
-        throw new IllegalStateException("table closed: " + tableName);
-      }
+      closedTables.remove(tableName);
 
       SortedMap<Text, HRegionLocation> servers =
         new TreeMap<Text, HRegionLocation>();
@@ -369,14 +365,14 @@
       }
       
       if (closedTables.contains(tableName)) {
-        throw new IllegalStateException("table closed: " + tableName);
+        throw new IllegalStateException("table already closed: " + tableName);
       }
 
       SortedMap<Text, HRegionLocation> tableServers =
         tablesToServers.remove(tableName);
 
       if (tableServers == null) {
-        throw new IllegalArgumentException("table was not opened: " + tableName);
+        throw new IllegalArgumentException("table not open: " + tableName);
       }
       
       closedTables.add(tableName);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java?view=diff&rev=562294&r1=562293&r2=562294
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTable.java Thu Aug  2 17:02:19 2007
@@ -59,6 +59,12 @@
   
   protected volatile boolean closed;
 
+  protected void checkClosed() {
+    if (closed) {
+      throw new IllegalStateException("table is closed");
+    }
+  }
+  
   /**
    * Creates an object to access a HBase table
    * 
@@ -85,6 +91,7 @@
    * @return Location of row.
    */
   HRegionLocation getRegionLocation(Text row) {
+    checkClosed();
     if (this.tableServers == null) {
       throw new IllegalStateException("Must open table first");
     }
@@ -96,9 +103,25 @@
   }
 
   /** @return the connection */
-  HConnection getConnection() {
+  public HConnection getConnection() {
+    checkClosed();
     return connection;
   }
+
+  /**
+   * Releases resources associated with this table. After calling close(), all
+   * other methods will throw an IllegalStateException
+   */
+  public synchronized void close() {
+    closed = true;
+    tableServers = null;
+    batch = null;
+    currentLockId = -1L;
+    currentRegion = null;
+    currentServer = null;
+    clientid = -1L;
+    connection.close(tableName);
+  }
   
   /**
    * Verifies that no update is in progress
@@ -114,9 +137,7 @@
    * @return Array of region starting row keys
    */
   public Text[] getStartKeys() {
-    if (closed) {
-      throw new IllegalStateException("table is closed");
-    }
+    checkClosed();
     Text[] keys = new Text[tableServers.size()];
     int i = 0;
     for(Text key: tableServers.keySet()){
@@ -134,6 +155,7 @@
    * @throws IOException
    */
   public byte[] get(Text row, Text column) throws IOException {
+    checkClosed();
     byte [] value = null;
     for(int tries = 0; tries < numRetries; tries++) {
       HRegionLocation r = getRegionLocation(row);
@@ -173,6 +195,7 @@
    * @throws IOException
    */
   public byte[][] get(Text row, Text column, int numVersions) throws IOException {
+    checkClosed();
     byte [][] values = null;
     for (int tries = 0; tries < numRetries; tries++) {
       HRegionLocation r = getRegionLocation(row);
@@ -226,6 +249,7 @@
    */
   public byte[][] get(Text row, Text column, long timestamp, int numVersions)
   throws IOException {
+    checkClosed();
     byte [][] values = null;
     for (int tries = 0; tries < numRetries; tries++) {
       HRegionLocation r = getRegionLocation(row);
@@ -274,6 +298,7 @@
    * @throws IOException
    */
   public SortedMap<Text, byte[]> getRow(Text row) throws IOException {
+    checkClosed();
     KeyedData[] value = null;
     for (int tries = 0; tries < numRetries; tries++) {
       HRegionLocation r = getRegionLocation(row);
@@ -372,6 +397,7 @@
       Text startRow, long timestamp, RowFilterInterface filter)
   throws IOException {
     
+    checkClosed();
     return new ClientScanner(columns, startRow, timestamp, filter);
   }
 
@@ -385,9 +411,8 @@
    * @return lockid to be used in subsequent put, delete and commit calls
    */
   public synchronized long startBatchUpdate(final Text row) {
-    if (batch != null || currentLockId != -1L) {
-      throw new IllegalStateException("update in progress");
-    }
+    checkClosed();
+    checkUpdateInProgress();
     batch = new BatchUpdate();
     return batch.startUpdate(row);
   }
@@ -397,6 +422,7 @@
    * @param lockid lock id returned by startBatchUpdate
    */
   public synchronized void abortBatch(final long lockid) {
+    checkClosed();
     if (batch == null) {
       throw new IllegalStateException("no batch update in progress");
     }
@@ -426,6 +452,7 @@
   public synchronized void commitBatch(final long lockid, final long timestamp)
   throws IOException {
 
+    checkClosed();
     if (batch == null) {
       throw new IllegalStateException("no batch update in progress");
     }
@@ -481,9 +508,8 @@
    * @throws IOException
    */
   public synchronized long startUpdate(final Text row) throws IOException {
-    if (currentLockId != -1L || batch != null) {
-      throw new IllegalStateException("update in progress");
-    }
+    checkClosed();
+    checkUpdateInProgress();
     for (int tries = 0; tries < numRetries; tries++) {
       IOException e = null;
       HRegionLocation info = getRegionLocation(row);
@@ -532,6 +558,7 @@
    * @throws IOException
    */
   public void put(long lockid, Text column, byte val[]) throws IOException {
+    checkClosed();
     if (val == null) {
       throw new IllegalArgumentException("value cannot be null");
     }
@@ -569,6 +596,7 @@
    * @throws IOException
    */
   public void delete(long lockid, Text column) throws IOException {
+    checkClosed();
     if (batch != null) {
       batch.delete(lockid, column);
       return;
@@ -602,6 +630,7 @@
    * @throws IOException
    */
   public synchronized void abort(long lockid) throws IOException {
+    checkClosed();
     if (batch != null) {
       abortBatch(lockid);
       return;
@@ -645,6 +674,7 @@
    * @throws IOException
    */
   public synchronized void commit(long lockid, long timestamp) throws IOException {
+    checkClosed();
     if (batch != null) {
       commitBatch(lockid, timestamp);
       return;
@@ -679,6 +709,7 @@
    * @throws IOException
    */
   public synchronized void renewLease(long lockid) throws IOException {
+    checkClosed();
     if (batch != null) {
       return;
     }
@@ -723,6 +754,7 @@
     private RowFilterInterface filter;
     
     private void loadRegions() {
+      checkClosed();
       Text firstServer = null;
       if (this.startRow == null || this.startRow.getLength() == 0) {
         firstServer = tableServers.firstKey();
@@ -763,6 +795,7 @@
      * Returns false if there are no more scanners.
      */
     private boolean nextScanner() throws IOException {
+      checkClosed();
       if (this.scannerId != -1L) {
         this.server.close(this.scannerId);
         this.scannerId = -1L;
@@ -821,6 +854,7 @@
      * {@inheritDoc}
      */
     public boolean next(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException {
+      checkClosed();
       if (this.closed) {
         return false;
       }
@@ -844,6 +878,7 @@
      * {@inheritDoc}
      */
     public void close() throws IOException {
+      checkClosed();
       if (this.scannerId != -1L) {
         this.server.close(this.scannerId);
         this.scannerId = -1L;

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHTable.java?view=auto&rev=562294
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHTable.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHTable.java Thu Aug  2 17:02:19 2007
@@ -0,0 +1,119 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.io.Text;
+
+/**
+ * Tests HTable
+ */
+public class TestHTable extends HBaseClusterTestCase implements HConstants {
+  private static final HColumnDescriptor column =
+    new HColumnDescriptor(COLUMN_FAMILY.toString());
+  
+  private static final Text tableAname = new Text("tableA");
+  private static final Text tableBname = new Text("tableB");
+  
+  private static final Text row = new Text("row");
+ 
+  /**
+   * the test
+   * @throws IOException
+   */
+  public void testHTable() throws IOException {
+    HTableDescriptor tableAdesc = new HTableDescriptor(tableAname.toString());
+    tableAdesc.addFamily(column);
+    
+    HTableDescriptor tableBdesc = new HTableDescriptor(tableBname.toString());
+    tableBdesc.addFamily(column);
+
+    // create a couple of tables
+    
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(tableAdesc);
+    admin.createTable(tableBdesc);
+    
+    // put some data into table A
+    
+    byte[] value = "value".getBytes(UTF8_ENCODING);
+    
+    HTable a = new HTable(conf, tableAname);
+    long lockid = a.startBatchUpdate(row);
+    a.put(lockid, COLUMN_FAMILY, value);
+    a.commit(lockid);
+    
+    // open a new connection to A and a connection to b
+    
+    HTable newA = new HTable(conf, tableAname);
+    HTable b = new HTable(conf, tableBname);
+
+    // copy data from A to B
+    
+    HScannerInterface s =
+      newA.obtainScanner(COLUMN_FAMILY_ARRAY, EMPTY_START_ROW);
+    
+    try {
+      HStoreKey key = new HStoreKey();
+      TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+      while(s.next(key, results)) {
+        lockid = b.startBatchUpdate(key.getRow());
+        for(Map.Entry<Text, byte[]> e: results.entrySet()) {
+          b.put(lockid, e.getKey(), e.getValue());
+        }
+        b.commit(lockid);
+      }
+    } finally {
+      s.close();
+    }
+    
+    // Close table A and note how A becomes inaccessable
+    
+    a.close();
+    
+    try {
+      a.get(row, COLUMN_FAMILY);
+      fail();
+    } catch (IllegalStateException e) {
+      // expected
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail();
+    }
+    
+    // Opening a new connection to A will cause the tables to be reloaded
+
+    try {
+      HTable anotherA = new HTable(conf, tableAname);
+      anotherA.get(row, COLUMN_FAMILY);
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail();
+    }
+    
+    // We can still access A through newA because it has the table information
+    // cached. And if it needs to recalibrate, that will cause the information
+    // to be reloaded.
+    
+  }
+}