You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2007/12/07 03:34:10 UTC

svn commit: r601961 - in /lucene/hadoop/trunk/src/contrib/hbase: ./ src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/

Author: stack
Date: Thu Dec  6 18:34:08 2007
New Revision: 601961

URL: http://svn.apache.org/viewvc?rev=601961&view=rev
Log:
HADOOP-2362 Leaking hdfs file handle on region split

Modified:
    lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
    lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestSplit.java

Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt Thu Dec  6 18:34:08 2007
@@ -59,6 +59,7 @@
    HADOOP-2347 REST servlet not thread safe but run in a threaded manner
                (Bryan Duxbury via Stack)
    HADOOP-2365 Result of HashFunction.hash() contains all identical values
+   HADOOP-2362 Leaking hdfs file handle on region split
 
   IMPROVEMENTS
    HADOOP-2401 Add convenience put method that takes writable

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java Thu Dec  6 18:34:08 2007
@@ -29,8 +29,6 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 
 /**
@@ -124,9 +122,6 @@
   protected long timestamp;                                     // The timestamp to match entries against
   private boolean wildcardMatch;
   private boolean multipleMatchers;
-  
-  protected DataOutputBuffer outbuf = new DataOutputBuffer();
-  protected DataInputBuffer inbuf = new DataInputBuffer();
 
   /** Constructor for abstract base class */
   HAbstractScanner(long timestamp, Text[] targetCols) throws IOException {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java Thu Dec  6 18:34:08 2007
@@ -908,9 +908,9 @@
         LOG.info("bootstrap: creating ROOT and first META regions");
         try {
           HRegion root = HRegion.createHRegion(HRegionInfo.rootRegionInfo,
-              this.dir, this.conf, null);
+              this.dir, this.conf);
           HRegion meta = HRegion.createHRegion(HRegionInfo.firstMetaRegionInfo,
-            this.dir, this.conf, null);
+            this.dir, this.conf);
 
           // Add first region from the META table to the ROOT region.
           HRegion.addRegionToMETA(root, meta);
@@ -2545,7 +2545,7 @@
       // 2. Create the HRegion
           
       HRegion region =
-        HRegion.createHRegion(newRegion, this.dir, this.conf, null);
+        HRegion.createHRegion(newRegion, this.dir, this.conf);
 
       // 3. Insert into meta
           

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java Thu Dec  6 18:34:08 2007
@@ -19,9 +19,12 @@
  */
 package org.apache.hadoop.hbase;
 
-import org.apache.hadoop.io.*;
 
-import java.io.*;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Writable;
 
 /*******************************************************************************
  * HMsg is for communicating instructions between the HMaster and the 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java Thu Dec  6 18:34:08 2007
@@ -608,8 +608,10 @@
     // under each region.
     HRegion regionA =
       new HRegion(rootDir, log, fs, conf, regionAInfo, dirA, null);
+    regionA.close();
     HRegion regionB =
       new HRegion(rootDir, log, fs, conf, regionBInfo, dirB, null);
+    regionB.close();
 
     // Cleanup
     boolean deleted = fs.delete(splits);    // Get rid of splits directory
@@ -1581,13 +1583,12 @@
    * @param info Info for region to create.
    * @param rootDir Root directory for HBase instance
    * @param conf
-   * @param initialFiles InitialFiles to pass new HRegion. Pass null if none.
    * @return new HRegion
    * 
    * @throws IOException
    */
   static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
-      final HBaseConfiguration conf, final Path initialFiles)
+      final HBaseConfiguration conf)
   throws IOException {
     Path regionDir = HRegion.getRegionDir(rootDir,
         HRegionInfo.encodeRegionName(info.getRegionName()));
@@ -1595,7 +1596,7 @@
     fs.mkdirs(regionDir);
     return new HRegion(rootDir,
       new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf, null),
-      fs, conf, info, initialFiles, null);
+      fs, conf, info, null, null);
   }
   
   /**

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java Thu Dec  6 18:34:08 2007
@@ -445,9 +445,6 @@
         synchronized(cacheFlusherLock) { // Don't interrupt while we're working
           if (e != null) {
             try {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("flushing region " + e.getRegion().getRegionName());
-              }
               if (e.getRegion().flushcache()) {
                 compactor.compactionRequested(e);
               }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java Thu Dec  6 18:34:08 2007
@@ -21,7 +21,9 @@
 
 import org.apache.hadoop.io.*;
 
-import java.io.*;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
 import java.net.InetSocketAddress;
 
 /**

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerInfo.java Thu Dec  6 18:34:08 2007
@@ -19,9 +19,12 @@
  */
 package org.apache.hadoop.hbase;
 
-import org.apache.hadoop.io.*;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Writable;
 
-import java.io.*;
 
 /**
  * HServerInfo contains metainfo about an HRegionServer, Currently it only
@@ -139,7 +142,6 @@
 
 
   // Writable
-  /** {@inheritDoc} */
   public void readFields(DataInput in) throws IOException {
     this.serverAddress.readFields(in);
     this.startCode = in.readLong();
@@ -147,7 +149,6 @@
     this.infoPort = in.readInt();
   }
 
-  /** {@inheritDoc} */
   public void write(DataOutput out) throws IOException {
     this.serverAddress.write(out);
     out.writeLong(this.startCode);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java Thu Dec  6 18:34:08 2007
@@ -20,7 +20,6 @@
 package org.apache.hadoop.hbase;
 
 import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
@@ -760,9 +759,11 @@
         bloomFilter = new RetouchedBloomFilter();
       }
       FSDataInputStream in = fs.open(filterFile);
-      bloomFilter.readFields(in);
-      fs.close();
-      
+      try {
+        bloomFilter.readFields(in);
+      } finally {
+        fs.close();
+      }
     } else {
       if (LOG.isDebugEnabled()) {
         LOG.debug("creating bloom filter for " + this.storeName);
@@ -913,7 +914,6 @@
           HStoreKey curkey = es.getKey();
           if (this.familyName.equals(HStoreKey.extractFamily(
               curkey.getColumn()))) {
-              
             out.append(curkey, new ImmutableBytesWritable(es.getValue()));
           }
         }
@@ -1040,7 +1040,7 @@
 
       // Write out a list of data files that we're replacing
       Path filesToReplace = new Path(curCompactStore, COMPACTION_TO_REPLACE);
-      DataOutputStream out = new DataOutputStream(fs.create(filesToReplace));
+      FSDataOutputStream out = fs.create(filesToReplace);
       try {
         out.writeInt(filesToCompact.size());
         for (HStoreFile hsf : filesToCompact) {
@@ -1052,7 +1052,7 @@
 
       // Indicate that we're done.
       Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
-      (new DataOutputStream(fs.create(doneFile))).close();
+      fs.create(doneFile).close();
 
       // Move the compaction into place.
       completeCompaction(curCompactStore);
@@ -2151,5 +2151,4 @@
         "next(HStoreKey, StortedMap(...) is more efficient");
     }
   }
-  
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java Thu Dec  6 18:34:08 2007
@@ -22,7 +22,6 @@
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
-import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
@@ -351,17 +350,15 @@
   static HStoreFile obtainNewHStoreFile(HBaseConfiguration conf, Path dir, 
       String encodedRegionName, Text colFamily, FileSystem fs)
       throws IOException {
-    
     Path mapdir = HStoreFile.getMapDir(dir, encodedRegionName, colFamily);
-    long fileId = Math.abs(rand.nextLong());
-
-    Path testpath1 = new Path(mapdir, createHStoreFilename(fileId));
-    Path testpath2 = new Path(mapdir, createHStoreInfoFilename(fileId));
-    while(fs.exists(testpath1) || fs.exists(testpath2)) {
+    Path testpath1 = null;
+    Path testpath2 = null;
+    long fileId = -1;
+    do {
       fileId = Math.abs(rand.nextLong());
       testpath1 = new Path(mapdir, createHStoreFilename(fileId));
       testpath2 = new Path(mapdir, createHStoreInfoFilename(fileId));
-    }
+    } while(fs.exists(testpath1) || fs.exists(testpath2));
     return new HStoreFile(conf, dir, encodedRegionName, colFamily, fileId);
   }
 
@@ -606,7 +603,7 @@
    */
   void writeInfo(FileSystem fs, long infonum) throws IOException {
     Path p = getInfoFilePath();
-    DataOutputStream out = new DataOutputStream(fs.create(p));
+    FSDataOutputStream out = fs.create(p);
     try {
       out.writeByte(INFO_SEQ_NUM);
       out.writeLong(infonum);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java Thu Dec  6 18:34:08 2007
@@ -330,5 +330,4 @@
     column.readFields(in);
     timestamp = in.readLong();
   }
-}
-
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner2.java Thu Dec  6 18:34:08 2007
@@ -305,10 +305,10 @@
       List<HRegion> newRegions = new ArrayList<HRegion>(2);
       newRegions.add(HRegion.createHRegion(
           new HRegionInfo(desc, null, new Text("midway")),
-          homedir, this.conf, null));
+          homedir, this.conf));
       newRegions.add(HRegion.createHRegion(
           new HRegionInfo(desc, new Text("midway"), null),
-          homedir, this.conf, null));
+          homedir, this.conf));
       try {
         for (HRegion r : newRegions) {
           addRegionToMETA(metaTable, r, this.cluster.getHMasterAddress(),

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestSplit.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestSplit.java?rev=601961&r1=601960&r2=601961&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestSplit.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestSplit.java Thu Dec  6 18:34:08 2007
@@ -85,67 +85,92 @@
     Text midkey = new Text();
     assertTrue(region.needsSplit(midkey));
     HRegion [] regions = split(region);
-    // Assert can get rows out of new regions.  Should be able to get first
-    // row from first region and the midkey from second region.
-    assertGet(regions[0], COLFAMILY_NAME3, new Text(START_KEY));
-    assertGet(regions[1], COLFAMILY_NAME3, midkey);
-    // Test I can get scanner and that it starts at right place.
-    assertScan(regions[0], COLFAMILY_NAME3, new Text(START_KEY));
-    assertScan(regions[1], COLFAMILY_NAME3, midkey);
-    // Now prove can't split regions that have references.
-    Text [] midkeys = new Text[regions.length];
-    for (int i = 0; i < regions.length; i++) {
-      midkeys[i] = new Text();
-      // Even after above splits, still needs split but after splits its
-      // unsplitable because biggest store file is reference.  References
-      // make the store unsplittable, until something bigger comes along.
-      assertFalse(regions[i].needsSplit(midkeys[i]));
-      // Add so much data to this region, we create a store file that is > than
-      // one of our unsplitable references.
-      // it will.
-      for (int j = 0; j < 2; j++) {
-        addContent(regions[i], COLFAMILY_NAME3);
-      }
-      addContent(regions[i], COLFAMILY_NAME2);
-      addContent(regions[i], COLFAMILY_NAME1);
-      regions[i].flushcache();
-    }
-    
-    // Assert that even if one store file is larger than a reference, the
-    // region is still deemed unsplitable (Can't split region if references
-    // presen).
-    for (int i = 0; i < regions.length; i++) {
-      midkeys[i] = new Text();
-      // Even after above splits, still needs split but after splits its
-      // unsplitable because biggest store file is reference.  References
-      // make the store unsplittable, until something bigger comes along.
-      assertFalse(regions[i].needsSplit(midkeys[i]));
-    }
-    
-    // To make regions splitable force compaction.
-    for (int i = 0; i < regions.length; i++) {
-      regions[i].compactStores();
-    }
+    try {
+      // Need to open the regions.
+      // TODO: Add an 'open' to HRegion... don't do open by constructing
+      // instance.
+      for (int i = 0; i < regions.length; i++) {
+        regions[i] = openClosedRegion(regions[i]);
+      }
+      // Assert can get rows out of new regions. Should be able to get first
+      // row from first region and the midkey from second region.
+      assertGet(regions[0], COLFAMILY_NAME3, new Text(START_KEY));
+      assertGet(regions[1], COLFAMILY_NAME3, midkey);
+      // Test I can get scanner and that it starts at right place.
+      assertScan(regions[0], COLFAMILY_NAME3, new Text(START_KEY));
+      assertScan(regions[1], COLFAMILY_NAME3, midkey);
+      // Now prove can't split regions that have references.
+      Text[] midkeys = new Text[regions.length];
+      for (int i = 0; i < regions.length; i++) {
+        midkeys[i] = new Text();
+        // Even after above splits, still needs split but after splits its
+        // unsplitable because biggest store file is reference. References
+        // make the store unsplittable, until something bigger comes along.
+        assertFalse(regions[i].needsSplit(midkeys[i]));
+        // Add so much data to this region, we create a store file that is >
+        // than
+        // one of our unsplitable references.
+        // it will.
+        for (int j = 0; j < 2; j++) {
+          addContent(regions[i], COLFAMILY_NAME3);
+        }
+        addContent(regions[i], COLFAMILY_NAME2);
+        addContent(regions[i], COLFAMILY_NAME1);
+        regions[i].flushcache();
+      }
 
-    TreeMap<String, HRegion> sortedMap = new TreeMap<String, HRegion>();
-    // Split these two daughter regions so then I'll have 4 regions.  Will
-    // split because added data above.
-    for (int i = 0; i < regions.length; i++) {
-      HRegion [] rs = split(regions[i]);
-      for (int j = 0; j < rs.length; j++) {
-        sortedMap.put(rs[j].getRegionName().toString(), rs[j]);
+      // Assert that even if one store file is larger than a reference, the
+      // region is still deemed unsplitable (Can't split region if references
+      // presen).
+      for (int i = 0; i < regions.length; i++) {
+        midkeys[i] = new Text();
+        // Even after above splits, still needs split but after splits its
+        // unsplitable because biggest store file is reference. References
+        // make the store unsplittable, until something bigger comes along.
+        assertFalse(regions[i].needsSplit(midkeys[i]));
+      }
+
+      // To make regions splitable force compaction.
+      for (int i = 0; i < regions.length; i++) {
+        regions[i].compactStores();
+      }
+
+      TreeMap<String, HRegion> sortedMap = new TreeMap<String, HRegion>();
+      // Split these two daughter regions so then I'll have 4 regions. Will
+      // split because added data above.
+      for (int i = 0; i < regions.length; i++) {
+        HRegion[] rs = split(regions[i]);
+        for (int j = 0; j < rs.length; j++) {
+          sortedMap.put(rs[j].getRegionName().toString(),
+            openClosedRegion(rs[j]));
+        }
+      }
+      LOG.info("Made 4 regions");
+      // The splits should have been even. Test I can get some arbitrary row out
+      // of each.
+      int interval = (LAST_CHAR - FIRST_CHAR) / 3;
+      byte[] b = START_KEY.getBytes(HConstants.UTF8_ENCODING);
+      for (HRegion r : sortedMap.values()) {
+        assertGet(r, COLFAMILY_NAME3, new Text(new String(b,
+            HConstants.UTF8_ENCODING)));
+        b[0] += interval;
+      }
+    } finally {
+      for (int i = 0; i < regions.length; i++) {
+        try {
+          regions[i].close();
+        } catch (IOException e) {
+          // Ignore.
+        }
       }
     }
-    LOG.info("Made 4 regions");
-    // The splits should have been even.  Test I can get some arbitrary row out
-    // of each.
-    int interval = (LAST_CHAR - FIRST_CHAR) / 3;
-    byte[] b = START_KEY.getBytes(HConstants.UTF8_ENCODING);
-    for (HRegion r: sortedMap.values()) {
-      assertGet(r, COLFAMILY_NAME3,
-          new Text(new String(b, HConstants.UTF8_ENCODING)));
-      b[0] += interval;
-    }
+  }
+  
+  private HRegion openClosedRegion(final HRegion closedRegion)
+  throws IOException {
+    return new HRegion(closedRegion.getRootDir(), closedRegion.getLog(),
+      closedRegion.getFilesystem(), closedRegion.getConf(),
+      closedRegion.getRegionInfo(), null, null);
   }
   
   /**