You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/04/10 00:32:31 UTC

svn commit: r1311500 - in /hbase/branches/0.94/src: main/java/org/apache/hadoop/hbase/coprocessor/ main/java/org/apache/hadoop/hbase/zookeeper/ test/java/org/apache/hadoop/hbase/ test/java/org/apache/hadoop/hbase/coprocessor/

Author: stack
Date: Mon Apr  9 22:32:30 2012
New Revision: 1311500

URL: http://svn.apache.org/viewvc?rev=1311500&view=rev
Log:
HBASE-5748 Enable lib directory in jar file for coprocessor -- UNDO OVERCOMMIT

Modified:
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java?rev=1311500&r1=1311499&r2=1311500&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java Mon Apr  9 22:32:30 2012
@@ -37,16 +37,12 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.io.IOUtils;
 
 import java.io.File;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URL;
 import java.net.URLClassLoader;
 import java.util.*;
-import java.util.jar.JarEntry;
-import java.util.jar.JarFile;
 
 /**
  * Provides the common setup framework and runtime services for coprocessor
@@ -195,21 +191,6 @@ public abstract class CoprocessorHost<E 
       // method which returns URLs for as long as it is available
       List<URL> paths = new ArrayList<URL>();
       paths.add(new File(dst.toString()).getCanonicalFile().toURL());
-
-      JarFile jarFile = new JarFile(dst.toString());
-      Enumeration<JarEntry> entries = jarFile.entries();
-      while (entries.hasMoreElements()) {
-        JarEntry entry = entries.nextElement();
-        if (entry.getName().matches("/lib/[^/]+\\.jar")) {
-          File file = new File(System.getProperty("java.io.tmpdir") +
-              java.io.File.separator +"." + pathPrefix +
-              "." + className + "." + System.currentTimeMillis() + "." + entry.getName().substring(5));
-          IOUtils.copyBytes(jarFile.getInputStream(entry), new FileOutputStream(file), conf, true);
-          file.deleteOnExit();
-          paths.add(file.toURL());
-        }
-      }
-
       StringTokenizer st = new StringTokenizer(cp, File.pathSeparator);
       while (st.hasMoreTokens()) {
         paths.add((new File(st.nextToken())).getCanonicalFile().toURL());

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1311500&r1=1311499&r2=1311500&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Mon Apr  9 22:32:30 2012
@@ -26,7 +26,6 @@ import java.io.PrintWriter;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
@@ -595,13 +594,11 @@ public class ZKUtil {
       ZooKeeperWatcher zkw, String baseNode) throws KeeperException {
     List<String> nodes =
       ZKUtil.listChildrenAndWatchForNewChildren(zkw, baseNode);
-    List<NodeAndData> newNodes = Collections.emptyList();
-    if (nodes != null) {
-      for (String node : nodes) {
-        String nodePath = ZKUtil.joinZNode(baseNode, node);
-        byte[] data = ZKUtil.getDataAndWatch(zkw, nodePath);
-        newNodes.add(new NodeAndData(nodePath, data));
-      }
+    List<NodeAndData> newNodes = new ArrayList<NodeAndData>();
+    for (String node: nodes) {
+      String nodePath = ZKUtil.joinZNode(baseNode, node);
+      byte [] data = ZKUtil.getDataAndWatch(zkw, nodePath);
+      newNodes.add(new NodeAndData(nodePath, data));
     }
     return newNodes;
   }

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java?rev=1311500&r1=1311499&r2=1311500&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java Mon Apr  9 22:32:30 2012
@@ -296,18 +296,6 @@ public class TestZooKeeper {
 
     ZKUtil.createAndFailSilent(zk2, aclZnode);
  }
-  
-  @Test
-  /**
-   * Test should not fail with NPE when getChildDataAndWatchForNewChildren
-   * invoked with wrongNode
-   */
-  public void testGetChildDataAndWatchForNewChildrenShouldNotThrowNPE()
-      throws Exception {
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
-        "testGetChildDataAndWatchForNewChildrenShouldNotThrowNPE", null);
-    ZKUtil.getChildDataAndWatchForNewChildren(zkw, "/wrongNode");
-  }
 
   @org.junit.Rule
   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java?rev=1311500&r1=1311499&r2=1311500&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java Mon Apr  9 22:32:30 2012
@@ -383,93 +383,6 @@ public class TestClassLoading {
   }
 
   @Test
-  public void testClassLoadingFromLibDirInJar() throws Exception {
-    FileSystem fs = cluster.getFileSystem();
-
-    File innerJarFile1 = buildCoprocessorJar(cpName1);
-    File innerJarFile2 = buildCoprocessorJar(cpName2);
-    File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");
-
-    byte buffer[] = new byte[BUFFER_SIZE];
-    // Open archive file
-    FileOutputStream stream = new FileOutputStream(outerJarFile);
-    JarOutputStream out = new JarOutputStream(stream, new Manifest());
-
-    for (File jarFile: new File[] { innerJarFile1, innerJarFile2 }) {
-      // Add archive entry
-      JarEntry jarAdd = new JarEntry("/lib/" + jarFile.getName());
-      jarAdd.setTime(jarFile.lastModified());
-      out.putNextEntry(jarAdd);
-  
-      // Write file to archive
-      FileInputStream in = new FileInputStream(jarFile);
-      while (true) {
-        int nRead = in.read(buffer, 0, buffer.length);
-        if (nRead <= 0)
-          break;
-        out.write(buffer, 0, nRead);
-      }
-      in.close();
-    }
-    out.close();
-    stream.close();
-    LOG.info("Adding jar file to outer jar file completed");
-
-    // copy the jars into dfs
-    fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
-      new Path(fs.getUri().toString() + Path.SEPARATOR));
-    String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
-      outerJarFile.getName();
-    assertTrue("Copy jar file to HDFS failed.",
-      fs.exists(new Path(jarFileOnHDFS)));
-    LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
-
-    // create a table that references the coprocessors
-    HTableDescriptor htd = new HTableDescriptor(tableName);
-    htd.addFamily(new HColumnDescriptor("test"));
-      // without configuration values
-    htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
-      "|" + Coprocessor.PRIORITY_USER);
-      // with configuration values
-    htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
-      "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
-    HBaseAdmin admin = new HBaseAdmin(this.conf);
-    if (admin.tableExists(tableName)) {
-      admin.disableTable(tableName);
-      admin.deleteTable(tableName);
-    }
-    admin.createTable(htd);
-
-    // verify that the coprocessors were loaded
-    boolean found1 = false, found2 = false, found2_k1 = false,
-        found2_k2 = false, found2_k3 = false;
-    MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
-    for (HRegion region:
-        hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
-      if (region.getRegionNameAsString().startsWith(tableName)) {
-        CoprocessorEnvironment env;
-        env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
-        if (env != null) {
-          found1 = true;
-        }
-        env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
-        if (env != null) {
-          found2 = true;
-          Configuration conf = env.getConfiguration();
-          found2_k1 = conf.get("k1") != null;
-          found2_k2 = conf.get("k2") != null;
-          found2_k3 = conf.get("k3") != null;
-        }
-      }
-    }
-    assertTrue("Class " + cpName1 + " was missing on a region", found1);
-    assertTrue("Class " + cpName2 + " was missing on a region", found2);
-    assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
-    assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
-    assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
-  }
-
-  @Test
   public void testRegionServerCoprocessorsReported() throws Exception {
     // HBASE 4070: Improve region server metrics to report loaded coprocessors
     // to master: verify that each regionserver is reporting the correct set of