You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by el...@apache.org on 2014/04/15 06:36:17 UTC

[1/3] git commit: ACCUMULO-2665 Workaround for hadoop-2.4.0 requiring a
Repository: accumulo
Updated Branches:
  refs/heads/1.6.0-SNAPSHOT 77fe4da07 -> 126b6482a
  refs/heads/master 63f1e7678 -> 0f0acb68b


ACCUMULO-2665 Workaround for hadoop-2.4.0 requiring a <Guava-15.0

Guava 15.0 removes the LimitInputStream class that hdfs uses internally. We need
to ensure that we have such a version on the classpath for tests that use MiniDFSCluster.
The test module is not prone to this due to sisu-guava being pulled in from maven -- avoiding
fixing that as well since this is a very ugly fix. This is a bandaid in term for a better long-term fix.


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/126b6482
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/126b6482
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/126b6482

Branch: refs/heads/1.6.0-SNAPSHOT
Commit: 126b6482aa57c9956d547258a094d3f9daf486a1
Parents: 77fe4da
Author: Josh Elser <el...@apache.org>
Authored: Tue Apr 15 00:33:00 2014 -0400
Committer: Josh Elser <el...@apache.org>
Committed: Tue Apr 15 00:33:00 2014 -0400

----------------------------------------------------------------------
 start/pom.xml                                                 | 7 +++++++
 .../test/java/org/apache/accumulo/test/AccumuloDFSBase.java   | 6 ++++--
 2 files changed, 11 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/126b6482/start/pom.xml
----------------------------------------------------------------------
diff --git a/start/pom.xml b/start/pom.xml
index e164e82..8747930 100644
--- a/start/pom.xml
+++ b/start/pom.xml
@@ -46,6 +46,13 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-client</artifactId>
     </dependency>
+    <!-- Hadoop-2.4.0 MiniDFSCluster uses classes from <Guava-15.0 -->
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>14.0.1</version><!--$NO-MVN-MAN-VER$-->
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/126b6482/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
----------------------------------------------------------------------
diff --git a/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java b/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
index 05623a8..8e2d534 100644
--- a/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
+++ b/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
@@ -66,7 +66,7 @@ public class AccumuloDFSBase {
     
     try {
       cluster = new MiniDFSCluster(conf, 1, true, null);
-      cluster.waitActive();
+      cluster.waitClusterUp();
       // We can't assume that the hostname of "localhost" will still be "localhost" after
       // starting up the NameNode. We may get mapped into a FQDN via settings in /etc/hosts.
       HDFS_URI = cluster.getFileSystem().getUri();
@@ -123,7 +123,9 @@ public class AccumuloDFSBase {
 
   @AfterClass
   public static void tearDownMiniDfsCluster() {
-    cluster.shutdown();
+    if (null != cluster) {
+      cluster.shutdown();
+    }
   }
 
 }


[3/3] git commit: Merge branch '1.6.0-SNAPSHOT'

Posted by el...@apache.org.
Merge branch '1.6.0-SNAPSHOT'


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/0f0acb68
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/0f0acb68
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/0f0acb68

Branch: refs/heads/master
Commit: 0f0acb68bde24075e14fd3cd4f5261df293589ce
Parents: 63f1e76 126b648
Author: Josh Elser <el...@apache.org>
Authored: Tue Apr 15 00:35:38 2014 -0400
Committer: Josh Elser <el...@apache.org>
Committed: Tue Apr 15 00:35:38 2014 -0400

----------------------------------------------------------------------
 start/pom.xml                                                 | 7 +++++++
 .../test/java/org/apache/accumulo/test/AccumuloDFSBase.java   | 6 ++++--
 2 files changed, 11 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/0f0acb68/start/pom.xml
----------------------------------------------------------------------


[2/3] git commit: ACCUMULO-2665 Workaround for hadoop-2.4.0 requiring a Posted by el...@apache.org.
ACCUMULO-2665 Workaround for hadoop-2.4.0 requiring a <Guava-15.0

Guava 15.0 removes the LimitInputStream class that hdfs uses internally. We need
to ensure that we have such a version on the classpath for tests that use MiniDFSCluster.
The test module is not prone to this due to sisu-guava being pulled in from maven -- avoiding
fixing that as well since this is a very ugly fix. This is a bandaid in term for a better long-term fix.


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/126b6482
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/126b6482
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/126b6482

Branch: refs/heads/master
Commit: 126b6482aa57c9956d547258a094d3f9daf486a1
Parents: 77fe4da
Author: Josh Elser <el...@apache.org>
Authored: Tue Apr 15 00:33:00 2014 -0400
Committer: Josh Elser <el...@apache.org>
Committed: Tue Apr 15 00:33:00 2014 -0400

----------------------------------------------------------------------
 start/pom.xml                                                 | 7 +++++++
 .../test/java/org/apache/accumulo/test/AccumuloDFSBase.java   | 6 ++++--
 2 files changed, 11 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/126b6482/start/pom.xml
----------------------------------------------------------------------
diff --git a/start/pom.xml b/start/pom.xml
index e164e82..8747930 100644
--- a/start/pom.xml
+++ b/start/pom.xml
@@ -46,6 +46,13 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-client</artifactId>
     </dependency>
+    <!-- Hadoop-2.4.0 MiniDFSCluster uses classes from <Guava-15.0 -->
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>14.0.1</version><!--$NO-MVN-MAN-VER$-->
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/126b6482/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
----------------------------------------------------------------------
diff --git a/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java b/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
index 05623a8..8e2d534 100644
--- a/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
+++ b/start/src/test/java/org/apache/accumulo/test/AccumuloDFSBase.java
@@ -66,7 +66,7 @@ public class AccumuloDFSBase {
     
     try {
       cluster = new MiniDFSCluster(conf, 1, true, null);
-      cluster.waitActive();
+      cluster.waitClusterUp();
       // We can't assume that the hostname of "localhost" will still be "localhost" after
       // starting up the NameNode. We may get mapped into a FQDN via settings in /etc/hosts.
       HDFS_URI = cluster.getFileSystem().getUri();
@@ -123,7 +123,9 @@ public class AccumuloDFSBase {
 
   @AfterClass
   public static void tearDownMiniDfsCluster() {
-    cluster.shutdown();
+    if (null != cluster) {
+      cluster.shutdown();
+    }
   }
 
 }