You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/04/10 00:35:11 UTC

svn commit: r1311503 - in /hbase/branches/0.92: CHANGES.txt src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java

Author: stack
Date: Mon Apr  9 22:35:10 2012
New Revision: 1311503

URL: http://svn.apache.org/viewvc?rev=1311503&view=rev
Log:
HBASE-5748 Enable lib directory in jar file for coprocessor

Modified:
    hbase/branches/0.92/CHANGES.txt
    hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
    hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java

Modified: hbase/branches/0.92/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/CHANGES.txt?rev=1311503&r1=1311502&r2=1311503&view=diff
==============================================================================
--- hbase/branches/0.92/CHANGES.txt (original)
+++ hbase/branches/0.92/CHANGES.txt Mon Apr  9 22:35:10 2012
@@ -32,6 +32,7 @@ Release 0.92.2 - Unreleased
                secure HBase server (Shaneal Manek)
    HBASE-5618  SplitLogManager - prevent unnecessary attempts to resubmits
                (Prakash Khemani)
+   HBASE-5748  Enable lib directory in jar file for coprocessor (Takuya Ueshin)
 
   IMPROVEMENTS
    HBASE-5592  Make it easier to get a table from shell (Ben West)

Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java?rev=1311503&r1=1311502&r2=1311503&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java Mon Apr  9 22:35:10 2012
@@ -37,12 +37,16 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.io.IOUtils;
 
 import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URL;
 import java.net.URLClassLoader;
 import java.util.*;
+import java.util.jar.JarEntry;
+import java.util.jar.JarFile;
 
 /**
  * Provides the common setup framework and runtime services for coprocessor
@@ -191,6 +195,21 @@ public abstract class CoprocessorHost<E 
       // method which returns URLs for as long as it is available
       List<URL> paths = new ArrayList<URL>();
       paths.add(new File(dst.toString()).getCanonicalFile().toURL());
+
+      JarFile jarFile = new JarFile(dst.toString());
+      Enumeration<JarEntry> entries = jarFile.entries();
+      while (entries.hasMoreElements()) {
+        JarEntry entry = entries.nextElement();
+        if (entry.getName().matches("/lib/[^/]+\\.jar")) {
+          File file = new File(System.getProperty("java.io.tmpdir") +
+              java.io.File.separator +"." + pathPrefix +
+              "." + className + "." + System.currentTimeMillis() + "." + entry.getName().substring(5));
+          IOUtils.copyBytes(jarFile.getInputStream(entry), new FileOutputStream(file), conf, true);
+          file.deleteOnExit();
+          paths.add(file.toURL());
+        }
+      }
+
       StringTokenizer st = new StringTokenizer(cp, File.pathSeparator);
       while (st.hasMoreTokens()) {
         paths.add((new File(st.nextToken())).getCanonicalFile().toURL());

Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java?rev=1311503&r1=1311502&r2=1311503&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java Mon Apr  9 22:35:10 2012
@@ -389,6 +389,93 @@ public class TestClassLoading {
   }
 
   @Test
+  public void testClassLoadingFromLibDirInJar() throws Exception {
+    FileSystem fs = cluster.getFileSystem();
+
+    File innerJarFile1 = buildCoprocessorJar(cpName1);
+    File innerJarFile2 = buildCoprocessorJar(cpName2);
+    File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
+
+    byte buffer[] = new byte[BUFFER_SIZE];
+    // Open archive file
+    FileOutputStream stream = new FileOutputStream(outerJarFile);
+    JarOutputStream out = new JarOutputStream(stream, new Manifest());
+
+    for (File jarFile: new File[] { innerJarFile1, innerJarFile2 }) {
+      // Add archive entry
+      JarEntry jarAdd = new JarEntry("/lib/" + jarFile.getName());
+      jarAdd.setTime(jarFile.lastModified());
+      out.putNextEntry(jarAdd);
+  
+      // Write file to archive
+      FileInputStream in = new FileInputStream(jarFile);
+      while (true) {
+        int nRead = in.read(buffer, 0, buffer.length);
+        if (nRead <= 0)
+          break;
+        out.write(buffer, 0, nRead);
+      }
+      in.close();
+    }
+    out.close();
+    stream.close();
+    LOG.info("Adding jar file to outer jar file completed");
+
+    // copy the jars into dfs
+    fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
+      new Path(fs.getUri().toString() + Path.SEPARATOR));
+    String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
+      outerJarFile.getName();
+    assertTrue("Copy jar file to HDFS failed.",
+      fs.exists(new Path(jarFileOnHDFS)));
+    LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
+
+    // create a table that references the coprocessors
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    htd.addFamily(new HColumnDescriptor("test"));
+      // without configuration values
+    htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
+      "|" + Coprocessor.PRIORITY_USER);
+      // with configuration values
+    htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
+      "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
+    HBaseAdmin admin = new HBaseAdmin(this.conf);
+    if (admin.tableExists(tableName)) {
+      admin.disableTable(tableName);
+      admin.deleteTable(tableName);
+    }
+    admin.createTable(htd);
+
+    // verify that the coprocessors were loaded
+    boolean found1 = false, found2 = false, found2_k1 = false,
+        found2_k2 = false, found2_k3 = false;
+    MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
+    for (HRegion region:
+        hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
+      if (region.getRegionNameAsString().startsWith(tableName)) {
+        CoprocessorEnvironment env;
+        env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
+        if (env != null) {
+          found1 = true;
+        }
+        env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
+        if (env != null) {
+          found2 = true;
+          Configuration conf = env.getConfiguration();
+          found2_k1 = conf.get("k1") != null;
+          found2_k2 = conf.get("k2") != null;
+          found2_k3 = conf.get("k3") != null;
+        }
+      }
+    }
+    assertTrue("Class " + cpName1 + " was missing on a region", found1);
+    assertTrue("Class " + cpName2 + " was missing on a region", found2);
+    assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
+    assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
+    assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
+  }
+
+  @Test
   public void testRegionServerCoprocessorsReported() throws Exception {
     // HBASE 4070: Improve region server metrics to report loaded coprocessors
     // to master: verify that each regionserver is reporting the correct set of