You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cm...@apache.org on 2015/01/09 01:13:03 UTC
hadoop git commit: HADOOP-11470. Remove some uses of obsolete guava
APIs from the hadoop codebase (Sangjin Lee via Colin P. McCabe)
Repository: hadoop
Updated Branches:
refs/heads/trunk 7e2d9a324 -> ae91b13a4
HADOOP-11470. Remove some uses of obsolete guava APIs from the hadoop codebase (Sangjin Lee via Colin P. McCabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae91b13a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae91b13a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae91b13a
Branch: refs/heads/trunk
Commit: ae91b13a4b1896b893268253104f935c3078d345
Parents: 7e2d9a3
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Thu Jan 8 16:09:44 2015 -0800
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Thu Jan 8 16:09:44 2015 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../apache/hadoop/hdfs/qjournal/server/Journal.java | 15 +++++++--------
.../hadoop/hdfs/TestDataTransferKeepalive.java | 4 +---
3 files changed, 11 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae91b13a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1358edc..b9704a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -482,6 +482,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch
(ozawa)
+ HADOOP-11470. Remove some uses of obsolete guava APIs from the hadoop
+ codebase. (Sangjin Lee via Colin P. McCabe)
+
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae91b13a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 7cac5c9..9d11ca5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -29,6 +29,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.concurrent.TimeUnit;
+import org.apache.commons.lang.math.LongRange;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -61,15 +62,13 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StopWatch;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Range;
-import com.google.common.collect.Ranges;
import com.google.protobuf.TextFormat;
-import org.apache.hadoop.util.StopWatch;
/**
* A JournalNode can manage journals for several clusters at once.
@@ -793,8 +792,8 @@ public class Journal implements Closeable {
// Paranoid sanity check: if the new log is shorter than the log we
// currently have, we should not end up discarding any transactions
// which are already Committed.
- if (txnRange(currentSegment).contains(committedTxnId.get()) &&
- !txnRange(segment).contains(committedTxnId.get())) {
+ if (txnRange(currentSegment).containsLong(committedTxnId.get()) &&
+ !txnRange(segment).containsLong(committedTxnId.get())) {
throw new AssertionError(
"Cannot replace segment " +
TextFormat.shortDebugString(currentSegment) +
@@ -812,7 +811,7 @@ public class Journal implements Closeable {
// If we're shortening the log, update our highest txid
// used for lag metrics.
- if (txnRange(currentSegment).contains(highestWrittenTxId)) {
+ if (txnRange(currentSegment).containsLong(highestWrittenTxId)) {
highestWrittenTxId = segment.getEndTxId();
}
}
@@ -856,10 +855,10 @@ public class Journal implements Closeable {
TextFormat.shortDebugString(newData));
}
- private Range<Long> txnRange(SegmentStateProto seg) {
+ private LongRange txnRange(SegmentStateProto seg) {
Preconditions.checkArgument(seg.hasEndTxId(),
"invalid segment: %s", seg);
- return Ranges.closed(seg.getStartTxId(), seg.getEndTxId());
+ return new LongRange(seg.getStartTxId(), seg.getEndTxId());
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae91b13a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
index 08aa2c9..1563b72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
@@ -29,7 +29,6 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.InputStream;
-import java.io.PrintWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -46,7 +45,6 @@ import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
-import com.google.common.io.NullOutputStream;
public class TestDataTransferKeepalive {
final Configuration conf = new HdfsConfiguration();
@@ -223,7 +221,7 @@ public class TestDataTransferKeepalive {
stms[i] = fs.open(TEST_FILE);
}
for (InputStream stm : stms) {
- IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
+ IOUtils.copyBytes(stm, new IOUtils.NullOutputStream(), 1024);
}
} finally {
IOUtils.cleanup(null, stms);