You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ma...@apache.org on 2019/04/03 19:00:45 UTC
[hadoop] branch trunk updated: HADOOP-16210. Update guava to
27.0-jre in hadoop-project trunk. Contributed by Gabor Bota.
This is an automated email from the ASF dual-hosted git repository.
mackrorysd pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new d797907 HADOOP-16210. Update guava to 27.0-jre in hadoop-project trunk. Contributed by Gabor Bota.
d797907 is described below
commit d7979079ea8c6514858b77a78f0119cffc178086
Author: Gabor Bota <ga...@cloudera.com>
AuthorDate: Thu Nov 29 16:27:55 2018 +0100
HADOOP-16210. Update guava to 27.0-jre in hadoop-project trunk. Contributed by Gabor Bota.
---
.../hadoop-common/dev-support/findbugsExcludeFile.xml | 7 +++++++
.../src/main/java/org/apache/hadoop/security/Groups.java | 2 +-
.../server/federation/resolver/order/LocalResolver.java | 2 +-
.../hadoop-hdfs/dev-support/findbugsExcludeFile.xml | 4 ++--
.../hadoop/hdfs/qjournal/client/IPCLoggerChannel.java | 2 +-
.../org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java | 3 ++-
.../org/apache/hadoop/hdfs/qjournal/server/Journal.java | 11 ++++++-----
.../hdfs/server/datanode/checker/DatasetVolumeChecker.java | 13 +++++++------
.../hdfs/server/datanode/checker/ThrottledAsyncChecker.java | 2 +-
.../datanode/checker/TestThrottledAsyncCheckerTimeout.java | 11 +++++++----
.../org/apache/hadoop/mapred/LocatedFileStatusFetcher.java | 9 ++++++---
hadoop-project/pom.xml | 2 +-
.../apache/hadoop/yarn/service/utils/ZookeeperUtils.java | 2 +-
13 files changed, 43 insertions(+), 27 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index c056d21..802197e 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -409,6 +409,13 @@
<Bug pattern="NP_NULL_PARAM_DEREF"/>
</Match>
+ <!-- Parameter is checked with Strings.isNullOrEmpty before accessing -->
+ <Match>
+ <Class name="org.apache.hadoop.conf.Configuration"/>
+ <Method name="asXmlDocument"/>
+ <Bug pattern="NP_PARAMETER_MUST_BE_NONNULL_BUT_MARKED_AS_NULLABLE"/>
+ </Match>
+
<Match>
<Class name="org.apache.hadoop.ipc.ExternalCall"/>
<Filed name="done"/>
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index 63ec9a5..b29278b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -375,7 +375,7 @@ public class Groups {
backgroundRefreshException.incrementAndGet();
backgroundRefreshRunning.decrementAndGet();
}
- });
+ }, MoreExecutors.directExecutor());
return listenableFuture;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
index a774677..58a8ed2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
@@ -196,7 +196,7 @@ public class LocalResolver extends RouterResolver<String, String> {
try {
String nsId = nn.getNameserviceId();
String rpcAddress = nn.getRpcAddress();
- String hostname = HostAndPort.fromString(rpcAddress).getHostText();
+ String hostname = HostAndPort.fromString(rpcAddress).getHost();
ret.put(hostname, nsId);
if (hostname.equals(localHostname)) {
ret.put(localIp, nsId);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index f9d834e..8fd1d98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -181,8 +181,8 @@
<!-- More complex cleanup logic confuses findbugs -->
<Match>
<Class name="org.apache.hadoop.hdfs.qjournal.server.Journal" />
- <Method name="persistPaxosData" />
- <Bug pattern="OS_OPEN_STREAM" />
+ <Method name="getPersistedPaxosData" />
+ <Bug pattern="NP_NULL_PARAM_DEREF" />
</Match>
<!-- guava 27.0 update: @Nullable is not detected, however it's there -->
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index d462ef6..3247476 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -447,7 +447,7 @@ public class IPCLoggerChannel implements AsyncLogger {
public void onSuccess(Void t) {
unreserveQueueSpace(data.length);
}
- });
+ }, MoreExecutors.directExecutor());
}
}
return ret;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
index dee74e6..ef32eb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
@@ -22,6 +22,7 @@ import java.util.Map.Entry;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.TimeUnit;
+import com.google.common.util.concurrent.MoreExecutors;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.Timer;
@@ -80,7 +81,7 @@ class QuorumCall<KEY, RESULT> {
public void onSuccess(RESULT res) {
qr.addResult(e.getKey(), res);
}
- });
+ }, MoreExecutors.directExecutor());
}
return qr;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 9e204cb..f0bdab1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -1084,11 +1084,12 @@ public class Journal implements Closeable {
fos.write('\n');
// Write human-readable data after the protobuf. This is only
// to assist in debugging -- it's not parsed at all.
- OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);
-
- writer.write(String.valueOf(newData));
- writer.write('\n');
- writer.flush();
+ try(OutputStreamWriter writer =
+ new OutputStreamWriter(fos, Charsets.UTF_8)) {
+ writer.write(String.valueOf(newData));
+ writer.write('\n');
+ writer.flush();
+ }
fos.flush();
success = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 5c590f6..7c13ed0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -24,6 +24,7 @@ import com.google.common.collect.Sets;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -224,12 +225,12 @@ public class DatasetVolumeChecker {
Futures.addCallback(olf.get(),
new ResultHandler(reference, healthyVolumes, failedVolumes,
numVolumes, new Callback() {
- @Override
- public void call(Set<FsVolumeSpi> ignored1,
- Set<FsVolumeSpi> ignored2) {
- latch.countDown();
- }
- }));
+ @Override
+ public void call(Set<FsVolumeSpi> ignored1,
+ Set<FsVolumeSpi> ignored2) {
+ latch.countDown();
+ }
+ }), MoreExecutors.directExecutor());
} else {
IOUtils.cleanup(null, reference);
if (numVolumes.decrementAndGet() == 0) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index bb1ed46..8844453 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -182,7 +182,7 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
t, timer.monotonicNow()));
}
}
- });
+ }, MoreExecutors.directExecutor());
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
index 91a793b..926747d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.checker;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
import org.apache.hadoop.util.FakeTimer;
import org.junit.Before;
import org.junit.Rule;
@@ -101,7 +102,7 @@ public class TestThrottledAsyncCheckerTimeout {
numCallbackInvocationsFailure.incrementAndGet();
callbackResult.set(true);
}
- });
+ }, MoreExecutors.directExecutor());
while (!callbackResult.get()) {
// Wait for the callback
@@ -133,7 +134,8 @@ public class TestThrottledAsyncCheckerTimeout {
.schedule(target, true);
assertTrue(olf1.isPresent());
- Futures.addCallback(olf1.get(), futureCallback);
+ Futures.addCallback(olf1.get(), futureCallback,
+ MoreExecutors.directExecutor());
// Verify that timeout results in only 1 onFailure call and 0 onSuccess
// calls.
@@ -149,7 +151,8 @@ public class TestThrottledAsyncCheckerTimeout {
.schedule(target, true);
assertTrue(olf2.isPresent());
- Futures.addCallback(olf2.get(), futureCallback);
+ Futures.addCallback(olf2.get(), futureCallback,
+ MoreExecutors.directExecutor());
// Verify that normal check (dummy) results in only 1 onSuccess call.
// Number of times onFailure is invoked should remain the same i.e. 1.
@@ -187,7 +190,7 @@ public class TestThrottledAsyncCheckerTimeout {
throwable[0] = t;
callbackResult.set(true);
}
- });
+ }, MoreExecutors.directExecutor());
while (!callbackResult.get()) {
// Wait for the callback
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
index 1b1025e..3869c49 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
@@ -120,7 +120,8 @@ public class LocatedFileStatusFetcher {
runningTasks.incrementAndGet();
ListenableFuture<ProcessInitialInputPathCallable.Result> future = exec
.submit(new ProcessInitialInputPathCallable(p, conf, inputFilter));
- Futures.addCallback(future, processInitialInputPathCallback);
+ Futures.addCallback(future, processInitialInputPathCallback,
+ MoreExecutors.directExecutor());
}
runningTasks.decrementAndGet();
@@ -267,7 +268,8 @@ public class LocatedFileStatusFetcher {
ListenableFuture<ProcessInputDirCallable.Result> future = exec
.submit(new ProcessInputDirCallable(result.fs, fileStatus,
recursive, inputFilter));
- Futures.addCallback(future, processInputDirCallback);
+ Futures.addCallback(future, processInputDirCallback,
+ MoreExecutors.directExecutor());
}
}
decrementRunningAndCheckCompletion();
@@ -353,7 +355,8 @@ public class LocatedFileStatusFetcher {
ListenableFuture<ProcessInputDirCallable.Result> future = exec
.submit(new ProcessInputDirCallable(result.fs, matched,
recursive, inputFilter));
- Futures.addCallback(future, processInputDirCallback);
+ Futures.addCallback(future, processInputDirCallback,
+ MoreExecutors.directExecutor());
}
}
decrementRunningAndCheckCompletion();
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 7bd012d..aac0315 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -92,7 +92,7 @@
<spotbugs.version>3.1.0-RC1</spotbugs.version>
<dnsjava.version>2.1.7</dnsjava.version>
- <guava.version>11.0.2</guava.version>
+ <guava.version>27.0-jre</guava.version>
<guice.version>4.0</guice.version>
<joda-time.version>2.9.9</joda-time.version>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java
index c6e8525..60eb9b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java
@@ -87,7 +87,7 @@ public class ZookeeperUtils {
public static String buildHostsOnlyList(List<HostAndPort> hostAndPorts) {
StringBuilder sb = new StringBuilder();
for (HostAndPort hostAndPort : hostAndPorts) {
- sb.append(hostAndPort.getHostText()).append(",");
+ sb.append(hostAndPort.getHost()).append(",");
}
if (sb.length() > 0) {
sb.delete(sb.length() - 1, sb.length());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org