You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xg...@apache.org on 2016/12/13 21:12:16 UTC

[1/6] hadoop git commit: HADOOP-13852 hadoop build to allow hadoop version property to be explicitly set. Contriibuted by Steve Loughran

Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 4c38f11ce -> ef34bf2bb


HADOOP-13852 hadoop build to allow hadoop version property to be explicitly set. Contriibuted by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f66f6189
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f66f6189
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f66f6189

Branch: refs/heads/YARN-5734
Commit: f66f61892ac7cbd57e8323d0a2a90cf58e9286ce
Parents: 4c38f11
Author: Steve Loughran <st...@apache.org>
Authored: Mon Dec 12 13:58:27 2016 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Dec 12 13:58:27 2016 +0000

----------------------------------------------------------------------
 BUILDING.txt                                             | 11 ++++++++++-
 .../src/main/resources/common-version-info.properties    |  4 ++--
 hadoop-project/pom.xml                                   |  3 +++
 .../src/main/resources/yarn-version-info.properties      |  2 +-
 4 files changed, 16 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66f6189/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index 8b2bba6..7afc3f0 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -390,7 +390,7 @@ http://www.zlib.net/
 ----------------------------------------------------------------------------------
 Building distributions:
 
- * Build distribution with native code    : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar]
+ * Build distribution with native code    : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar][-Dmaven.javadoc.skip=true]
 
 ----------------------------------------------------------------------------------
 Running compatibility checks with checkcompatibility.py
@@ -402,3 +402,12 @@ managers to compare the compatibility of a previous and current release.
 As an example, this invocation will check the compatibility of interfaces annotated as Public or LimitedPrivate:
 
 ./dev-support/bin/checkcompatibility.py --annotation org.apache.hadoop.classification.InterfaceAudience.Public --annotation org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate --include "hadoop.*" branch-2.7.2 trunk
+
+----------------------------------------------------------------------------------
+Changing the Hadoop version declared returned by VersionInfo
+
+If for compatibility reasons the version of Hadoop has to be declared as a 2.x release in the information returned by
+org.apache.hadoop.util.VersionInfo, set the property declared.hadoop.version to the desired version.
+For example: mvn package -Pdist -Ddeclared.hadoop.version=2.11
+
+If unset, the project version declared in the POM file is used.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66f6189/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
index ad9a24d..9b74960 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
+++ b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
@@ -16,11 +16,11 @@
 # limitations under the License.
 #
 
-version=${pom.version}
+version=${declared.hadoop.version}
 revision=${version-info.scm.commit}
 branch=${version-info.scm.branch}
 user=${user.name}
 date=${version-info.build.time}
 url=${version-info.scm.uri}
 srcChecksum=${version-info.source.md5}
-protocVersion=${protobuf.version}
\ No newline at end of file
+protocVersion=${protobuf.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66f6189/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 336231e..9ec24ea 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -126,6 +126,9 @@
     <native-maven-plugin.version>1.0-alpha-8</native-maven-plugin.version>
     <surefire.fork.timeout>900</surefire.fork.timeout>
     <aws-java-sdk.version>1.11.45</aws-java-sdk.version>
+    <!-- the version of Hadoop declared in the version resources; can be overridden
+    so that Hadoop 3.x can declare itself a 2.x artifact. -->
+    <declared.hadoop.version>${pom.version}</declared.hadoop.version>
   </properties>
 
   <dependencyManagement>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f66f6189/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
index 9a8575c..ee6f13d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties
@@ -16,7 +16,7 @@
 # limitations under the License.
 #
 
-version=${pom.version}
+version=${declared.hadoop.version}
 revision=${version-info.scm.commit}
 branch=${version-info.scm.branch}
 user=${user.name}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/6] hadoop git commit: HADOOP-13871. ITestS3AInputStreamPerformance.testTimeToOpenAndReadWholeFileBlocks performance awful. Contributed by Steve Loughran

Posted by xg...@apache.org.
HADOOP-13871. ITestS3AInputStreamPerformance.testTimeToOpenAndReadWholeFileBlocks performance awful. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6a39232
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6a39232
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6a39232

Branch: refs/heads/YARN-5734
Commit: c6a39232456fa0c98b2b9b6dbeaec762294ca01e
Parents: f66f618
Author: Mingliang Liu <li...@apache.org>
Authored: Mon Dec 12 14:55:34 2016 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Dec 12 14:55:34 2016 -0800

----------------------------------------------------------------------
 .../apache/hadoop/fs/s3a/S3AInputStream.java    | 33 ++++++++++---
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  3 ++
 .../tools/hadoop-aws/troubleshooting_s3a.md     | 52 ++++++++++++++++++++
 .../scale/ITestS3AInputStreamPerformance.java   | 47 ++++++++++++++++--
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 20 +++++---
 5 files changed, 140 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a39232/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index dd6cdd7..3c4093d 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -132,7 +132,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
       throws IOException {
 
     if (wrappedStream != null) {
-      closeStream("reopen(" + reason + ")", contentRangeFinish);
+      closeStream("reopen(" + reason + ")", contentRangeFinish, false);
     }
 
     contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos,
@@ -257,7 +257,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
 
     // if the code reaches here, the stream needs to be reopened.
     // close the stream; if read the object will be opened at the new pos
-    closeStream("seekInStream()", this.contentRangeFinish);
+    closeStream("seekInStream()", this.contentRangeFinish, false);
     pos = targetPos;
   }
 
@@ -414,7 +414,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
       closed = true;
       try {
         // close or abort the stream
-        closeStream("close() operation", this.contentRangeFinish);
+        closeStream("close() operation", this.contentRangeFinish, false);
         // this is actually a no-op
         super.close();
       } finally {
@@ -431,17 +431,17 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
    * an abort.
    *
    * This does not set the {@link #closed} flag.
-   *
    * @param reason reason for stream being closed; used in messages
    * @param length length of the stream.
+   * @param forceAbort force an abort; used if explicitly requested.
    */
-  private void closeStream(String reason, long length) {
+  private void closeStream(String reason, long length, boolean forceAbort) {
     if (wrappedStream != null) {
 
       // if the amount of data remaining in the current request is greater
       // than the readahead value: abort.
       long remaining = remainingInCurrentRequest();
-      boolean shouldAbort = remaining > readahead;
+      boolean shouldAbort = forceAbort || remaining > readahead;
       if (!shouldAbort) {
         try {
           // clean close. This will read to the end of the stream,
@@ -470,6 +470,27 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
     }
   }
 
+  /**
+   * Forcibly reset the stream, by aborting the connection. The next
+   * {@code read()} operation will trigger the opening of a new HTTPS
+   * connection.
+   *
+   * This is potentially very inefficient, and should only be invoked
+   * in extreme circumstances. It logs at info for this reason.
+   * @return true if the connection was actually reset.
+   * @throws IOException if invoked on a closed stream.
+   */
+  @InterfaceStability.Unstable
+  public synchronized boolean resetConnection() throws IOException {
+    checkNotClosed();
+    boolean connectionOpen = wrappedStream != null;
+    if (connectionOpen) {
+      LOG.info("Forced reset of connection to {}", uri);
+      closeStream("reset()", contentRangeFinish, true);
+    }
+    return connectionOpen;
+  }
+
   @Override
   public synchronized int available() throws IOException {
     checkNotClosed();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a39232/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index dedbfd4..aeb8403 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -509,6 +509,7 @@ public final class S3AUtils {
     Preconditions.checkArgument(v >= min,
         String.format("Value of %s: %d is below the minimum value %d",
             key, v, min));
+    LOG.debug("Value of {} is {}", key, v);
     return v;
   }
 
@@ -529,6 +530,7 @@ public final class S3AUtils {
     Preconditions.checkArgument(v >= min,
         String.format("Value of %s: %d is below the minimum value %d",
             key, v, min));
+    LOG.debug("Value of {} is {}", key, v);
     return v;
   }
 
@@ -550,6 +552,7 @@ public final class S3AUtils {
     Preconditions.checkArgument(v >= min,
             String.format("Value of %s: %d is below the minimum value %d",
                     key, v, min));
+    LOG.debug("Value of {} is {}", key, v);
     return v;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a39232/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md
new file mode 100644
index 0000000..d79720e
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md
@@ -0,0 +1,52 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# Troubleshooting S3A
+
+Here are some lower level details and hints on troubleshooting and tuning
+the S3A client.
+
+## Logging at lower levels
+
+The AWS SDK and the Apache HTTP components can be configured to log at
+more detail, as can S3A itself.
+
+```properties
+log4j.logger.org.apache.hadoop.fs.s3a=DEBUG
+log4j.logger.com.amazonaws.request=DEBUG
+log4j.logger.org.apache.http=DEBUG
+log4j.logger.org.apache.http.wire=ERROR
+```
+
+Be aware that logging HTTP headers may leak sensitive AWS account information,
+so should not be shared.
+
+## Advanced: network performance
+
+An example of this is covered in [HADOOP-13871](https://issues.apache.org/jira/browse/HADOOP-13871).
+
+1. For public data, use `curl`:
+
+        curl -O https://landsat-pds.s3.amazonaws.com/scene_list.gz
+1. Use `nettop` to monitor a processes connections.
+
+Consider reducing the connection timeout of the s3a connection.
+
+```xml
+<property>
+  <name>fs.s3a.connection.timeout</name>
+  <value>15000</value>
+</property>
+```
+This *may* cause the client to react faster to network pauses.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a39232/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
index cc8187e..e36d086 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.util.LineReader;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
@@ -216,12 +217,18 @@ public class ITestS3AInputStreamPerformance extends S3AScaleTestBase {
     long count = 0;
     // implicitly rounding down here
     long blockCount = len / blockSize;
+    long totalToRead = blockCount * blockSize;
+    long minimumBandwidth = 128 * 1024;
+    int maxResetCount = 4;
+    int resetCount = 0;
     for (long i = 0; i < blockCount; i++) {
       int offset = 0;
       int remaining = blockSize;
+      long blockId = i + 1;
       NanoTimer blockTimer = new NanoTimer();
       int reads = 0;
       while (remaining > 0) {
+        NanoTimer readTimer = new NanoTimer();
         int bytesRead = in.read(block, offset, remaining);
         reads++;
         if (bytesRead == 1) {
@@ -230,14 +237,48 @@ public class ITestS3AInputStreamPerformance extends S3AScaleTestBase {
         remaining -= bytesRead;
         offset += bytesRead;
         count += bytesRead;
+        readTimer.end();
+        if (bytesRead != 0) {
+          LOG.debug("Bytes in read #{}: {} , block bytes: {}," +
+                  " remaining in block: {}" +
+                  " duration={} nS; ns/byte: {}, bandwidth={} MB/s",
+              reads, bytesRead, blockSize - remaining, remaining,
+              readTimer.duration(),
+              readTimer.nanosPerOperation(bytesRead),
+              readTimer.bandwidthDescription(bytesRead));
+        } else {
+          LOG.warn("0 bytes returned by read() operation #{}", reads);
+        }
+      }
+      blockTimer.end("Reading block %d in %d reads", blockId, reads);
+      String bw = blockTimer.bandwidthDescription(blockSize);
+      LOG.info("Bandwidth of block {}: {} MB/s: ", blockId, bw);
+      if (bandwidth(blockTimer, blockSize) < minimumBandwidth) {
+        LOG.warn("Bandwidth {} too low on block {}: resetting connection",
+            bw, blockId);
+        Assert.assertTrue("Bandwidth of " + bw +" too low after  "
+            + resetCount + " attempts", resetCount <= maxResetCount);
+        resetCount++;
+        // reset the connection
+        getS3AInputStream(in).resetConnection();
       }
-      blockTimer.end("Reading block %d in %d reads", i, reads);
     }
-    timer2.end("Time to read %d bytes in %d blocks", len, blockCount);
-    bandwidth(timer2, count);
+    timer2.end("Time to read %d bytes in %d blocks", totalToRead, blockCount);
+    LOG.info("Overall Bandwidth {} MB/s; reset connections {}",
+        timer2.bandwidth(totalToRead), resetCount);
     logStreamStatistics();
   }
 
+  /**
+   * Work out the bandwidth in bytes/second.
+   * @param timer timer measuring the duration
+   * @param bytes bytes
+   * @return the number of bytes/second of the recorded operation
+   */
+  public static double bandwidth(NanoTimer timer, long bytes) {
+    return bytes * 1.0e9 / timer.duration();
+  }
+
   @Test
   public void testLazySeekEnabled() throws Throwable {
     describe("Verify that seeks do not trigger any IO");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a39232/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
index c4174bf..9da621f 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.s3a.S3ATestConstants;
 import org.apache.hadoop.fs.s3a.Statistic;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 
-import org.junit.Assert;
 import org.junit.Assume;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -163,14 +162,23 @@ public class S3AScaleTestBase extends AbstractS3ATestBase {
    */
   protected S3AInstrumentation.InputStreamStatistics getInputStreamStatistics(
       FSDataInputStream in) {
+    return getS3AInputStream(in).getS3AStreamStatistics();
+  }
+
+  /**
+   * Get the inner stream of an input stream.
+   * Raises an exception if the inner stream is not an S3A input stream
+   * @param in wrapper
+   * @return the inner stream
+   * @throws AssertionError if the inner stream is of the wrong type
+   */
+  protected S3AInputStream getS3AInputStream(
+      FSDataInputStream in) {
     InputStream inner = in.getWrappedStream();
     if (inner instanceof S3AInputStream) {
-      S3AInputStream s3a = (S3AInputStream) inner;
-      return s3a.getS3AStreamStatistics();
+      return (S3AInputStream) inner;
     } else {
-      Assert.fail("Not an S3AInputStream: " + inner);
-      // never reached
-      return null;
+      throw new AssertionError("Not an S3AInputStream: " + inner);
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[3/6] hadoop git commit: HDFS-11233. Fix javac warnings related to the deprecated APIs after upgrading Jackson. Contributed by Yiqun Lin.

Posted by xg...@apache.org.
HDFS-11233. Fix javac warnings related to the deprecated APIs after upgrading Jackson. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d4731c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d4731c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d4731c0

Branch: refs/heads/YARN-5734
Commit: 2d4731c067ff64cd88f496eac8faaf302faa2ccc
Parents: c6a3923
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Dec 13 11:11:15 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Dec 13 11:11:15 2016 +0900

----------------------------------------------------------------------
 .../hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java    | 2 +-
 .../hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java  | 4 ++--
 .../org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java | 2 +-
 .../main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java | 2 +-
 .../java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java   | 2 +-
 .../web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java | 2 +-
 .../hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java  | 2 +-
 .../hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java    | 2 +-
 .../hadoop/hdfs/server/diskbalancer/command/Command.java     | 2 +-
 .../server/diskbalancer/connectors/JsonNodeConnector.java    | 2 +-
 .../server/diskbalancer/datamodel/DiskBalancerCluster.java   | 2 +-
 .../server/diskbalancer/datamodel/DiskBalancerVolume.java    | 2 +-
 .../hadoop/hdfs/server/diskbalancer/planner/NodePlan.java    | 2 +-
 .../test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java   | 8 ++++----
 14 files changed, 18 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
index 0cdd107..505273e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
@@ -37,7 +37,7 @@ import java.io.IOException;
 public class DiskBalancerWorkItem {
   private static final ObjectMapper MAPPER = new ObjectMapper();
   private static final ObjectReader READER =
-      new ObjectMapper().reader(DiskBalancerWorkItem.class);
+      new ObjectMapper().readerFor(DiskBalancerWorkItem.class);
 
   private  long startTime;
   private long secondsElapsed;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
index 452ab27..af34c49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
@@ -43,9 +43,9 @@ public class DiskBalancerWorkStatus {
   private static final ObjectMapper MAPPER_WITH_INDENT_OUTPUT =
       new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT);
   private static final ObjectReader READER_WORKSTATUS =
-      new ObjectMapper().reader(DiskBalancerWorkStatus.class);
+      new ObjectMapper().readerFor(DiskBalancerWorkStatus.class);
   private static final ObjectReader READER_WORKENTRY = new ObjectMapper()
-      .reader(defaultInstance().constructCollectionType(List.class,
+      .readerFor(defaultInstance().constructCollectionType(List.class,
           DiskBalancerWorkEntry.class));
 
   private final List<DiskBalancerWorkEntry> currentState;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index 668d56e..8da5655 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 @InterfaceStability.Unstable
 public final class CombinedHostsFileReader {
   private static final ObjectReader READER =
-      new ObjectMapper().reader(DatanodeAdminProperties.class);
+      new ObjectMapper().readerFor(DatanodeAdminProperties.class);
   private static final JsonFactory JSON_FACTORY = new JsonFactory();
 
   private CombinedHostsFileReader() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 3690a86..246f242 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -526,7 +526,7 @@ class JsonUtilClient {
     }
 
     final String namesInJson = (String) json.get("XAttrNames");
-    ObjectReader reader = new ObjectMapper().reader(List.class);
+    ObjectReader reader = new ObjectMapper().readerFor(List.class);
     final List<Object> xattrs = reader.readValue(namesInJson);
     final List<String> names =
         Lists.newArrayListWithCapacity(json.keySet().size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index fbb4bd6..26cfc01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -152,7 +152,7 @@ public class WebHdfsFileSystem extends FileSystem
   private String restCsrfCustomHeader;
   private Set<String> restCsrfMethodsToIgnore;
   private static final ObjectReader READER =
-      new ObjectMapper().reader(Map.class);
+      new ObjectMapper().readerFor(Map.class);
 
   private DFSOpsCountStatistics storageStatistics;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
index f90ca19..c6ebdd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java
@@ -56,7 +56,7 @@ import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
 public class ConfRefreshTokenBasedAccessTokenProvider
     extends AccessTokenProvider {
   private static final ObjectReader READER =
-      new ObjectMapper().reader(Map.class);
+      new ObjectMapper().readerFor(Map.class);
 
   public static final String OAUTH_REFRESH_TOKEN_KEY
       = "dfs.webhdfs.oauth2.refresh.token";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
index 8c0ddd4..5c629e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java
@@ -56,7 +56,7 @@ import static org.apache.hadoop.hdfs.web.oauth2.Utils.notNull;
 public abstract class CredentialBasedAccessTokenProvider
     extends AccessTokenProvider {
   private static final ObjectReader READER =
-      new ObjectMapper().reader(Map.class);
+      new ObjectMapper().readerFor(Map.class);
 
   public static final String OAUTH_CREDENTIAL_KEY
       = "dfs.webhdfs.oauth2.credential";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 97bb09b..c317715 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -106,7 +106,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   private static final ObjectWriter WRITER =
       new ObjectMapper().writerWithDefaultPrettyPrinter();
   private static final ObjectReader READER =
-      new ObjectMapper().reader(BlockIteratorState.class);
+      new ObjectMapper().readerFor(BlockIteratorState.class);
 
   private final FsDatasetImpl dataset;
   private final String storageID;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index cee37e5..22fd5be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -77,7 +77,7 @@ import java.util.TreeSet;
  */
 public abstract class Command extends Configured implements Closeable {
   private static final ObjectReader READER =
-      new ObjectMapper().reader(HashMap.class);
+      new ObjectMapper().readerFor(HashMap.class);
   static final Logger LOG = LoggerFactory.getLogger(Command.class);
   private Map<String, String> validArgs = new HashMap<>();
   private URI clusterURI;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
index 323938a..838511b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java
@@ -38,7 +38,7 @@ public class JsonNodeConnector implements ClusterConnector {
   private static final Logger LOG =
       LoggerFactory.getLogger(JsonNodeConnector.class);
   private static final ObjectReader READER =
-      new ObjectMapper().reader(DiskBalancerCluster.class);
+      new ObjectMapper().readerFor(DiskBalancerCluster.class);
   private final URL clusterURI;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
index 1b17829..2f54141 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
@@ -73,7 +73,7 @@ public class DiskBalancerCluster {
   private static final Logger LOG =
       LoggerFactory.getLogger(DiskBalancerCluster.class);
   private static final ObjectReader READER =
-      new ObjectMapper().reader(DiskBalancerCluster.class);
+      new ObjectMapper().readerFor(DiskBalancerCluster.class);
   private final Set<String> exclusionList;
   private final Set<String> inclusionList;
   private ClusterConnector clusterConnector;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index ccaa7d9..47a925c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -33,7 +33,7 @@ import java.io.IOException;
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class DiskBalancerVolume {
   private static final ObjectReader READER =
-      new ObjectMapper().reader(DiskBalancerVolume.class);
+      new ObjectMapper().readerFor(DiskBalancerVolume.class);
 
   private String path;
   private long capacity;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java
index 153536a..c72bb1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java
@@ -166,7 +166,7 @@ public class NodePlan {
   public String toJson() throws IOException {
     ObjectMapper mapper = new ObjectMapper();
     JavaType planType = mapper.constructType(NodePlan.class);
-    return mapper.writerWithType(planType)
+    return mapper.writerFor(planType)
         .writeValueAsString(this);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d4731c0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 94cec85..5a3d451 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -71,7 +71,7 @@ public class TestJsonUtil {
     System.out.println("fstatus = " + fstatus);
     final String json = JsonUtil.toJsonString(status, true);
     System.out.println("json    = " + json.replace(",", ",\n  "));
-    ObjectReader reader = new ObjectMapper().reader(Map.class);
+    ObjectReader reader = new ObjectMapper().readerFor(Map.class);
     final HdfsFileStatus s2 =
         JsonUtilClient.toFileStatus((Map<?, ?>) reader.readValue(json), true);
     final FileStatus fs2 = toFileStatus(s2, parent);
@@ -159,7 +159,7 @@ public class TestJsonUtil {
   public void testToAclStatus() throws IOException {
     String jsonString =
         "{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
-    ObjectReader reader = new ObjectMapper().reader(Map.class);
+    ObjectReader reader = new ObjectMapper().readerFor(Map.class);
     Map<?, ?> json = reader.readValue(jsonString);
 
     List<AclEntry> aclSpec =
@@ -219,7 +219,7 @@ public class TestJsonUtil {
     String jsonString = 
         "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
         "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
-    ObjectReader reader = new ObjectMapper().reader(Map.class);
+    ObjectReader reader = new ObjectMapper().readerFor(Map.class);
     Map<?, ?> json = reader.readValue(jsonString);
     XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
         setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build();
@@ -245,7 +245,7 @@ public class TestJsonUtil {
     String jsonString = 
         "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
         "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
-    ObjectReader reader = new ObjectMapper().reader(Map.class);
+    ObjectReader reader = new ObjectMapper().readerFor(Map.class);
     Map<?, ?> json = reader.readValue(jsonString);
 
     // Get xattr: user.a2


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[6/6] hadoop git commit: HADOOP-13900. Remove snapshot version of SDK dependency from Azure Data Lake Store File System. Contributed by Vishwajeet Dusane

Posted by xg...@apache.org.
HADOOP-13900. Remove snapshot version of SDK dependency from Azure Data Lake Store File System. Contributed by Vishwajeet Dusane


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef34bf2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef34bf2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef34bf2b

Branch: refs/heads/YARN-5734
Commit: ef34bf2bb92a4e8def6617b185ae72db81450de8
Parents: b0b033e
Author: Mingliang Liu <li...@apache.org>
Authored: Tue Dec 13 10:34:32 2016 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue Dec 13 10:34:32 2016 -0800

----------------------------------------------------------------------
 hadoop-tools/hadoop-azure-datalake/pom.xml | 10 +---------
 1 file changed, 1 insertion(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef34bf2b/hadoop-tools/hadoop-azure-datalake/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 7f872c3..b2d419f 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -34,14 +34,6 @@
     <file.encoding>UTF-8</file.encoding>
     <downloadSources>true</downloadSources>
   </properties>
-  <repositories>
-    <repository>
-      <id>snapshots-repo</id>
-      <url>https://oss.sonatype.org/content/repositories/snapshots</url>
-      <releases><enabled>false</enabled></releases>
-      <snapshots><enabled>true</enabled></snapshots>
-    </repository>
-  </repositories>
   <build>
     <plugins>
       <plugin>
@@ -128,7 +120,7 @@
     <dependency>
       <groupId>com.microsoft.azure</groupId>
       <artifactId>azure-data-lake-store-sdk</artifactId>
-      <version>2.0.4-SNAPSHOT</version>
+      <version>2.0.11</version>
     </dependency>
     <!--  ENDS HERE-->
     <dependency>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[4/6] hadoop git commit: HDFS-11226. cacheadmin, cryptoadmin and storagepolicyadmin should support generic options. Contributed by Brahma Reddy Battula

Posted by xg...@apache.org.
HDFS-11226. cacheadmin, cryptoadmin and storagepolicyadmin should support generic options. Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/754f15ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/754f15ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/754f15ba

Branch: refs/heads/YARN-5734
Commit: 754f15bae61b81ad3c2e3f722d1feaebf374e2c4
Parents: 2d4731c
Author: Mingliang Liu <li...@apache.org>
Authored: Mon Dec 12 17:36:52 2016 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Mon Dec 12 18:21:15 2016 -0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java   | 8 ++++++--
 .../main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java  | 8 ++++++--
 .../org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java     | 6 +++++-
 3 files changed, 17 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/754f15ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
index 270a662..522f701 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 
 import com.google.common.base.Joiner;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * This class implements command-line operations on the HDFS Cache.
@@ -64,6 +65,7 @@ public class CacheAdmin extends Configured implements Tool {
   public int run(String[] args) throws IOException {
     if (args.length == 0) {
       AdminHelper.printUsage(false, "cacheadmin", COMMANDS);
+      ToolRunner.printGenericCommandUsage(System.err);
       return 1;
     }
     AdminHelper.Command command = AdminHelper.determineCommand(args[0],
@@ -74,6 +76,7 @@ public class CacheAdmin extends Configured implements Tool {
         System.err.println("Command names must start with dashes.");
       }
       AdminHelper.printUsage(false, "cacheadmin", COMMANDS);
+      ToolRunner.printGenericCommandUsage(System.err);
       return 1;
     }
     List<String> argsList = new LinkedList<String>();
@@ -88,9 +91,10 @@ public class CacheAdmin extends Configured implements Tool {
     }
   }
 
-  public static void main(String[] argsArray) throws IOException {
+  public static void main(String[] argsArray) throws Exception {
     CacheAdmin cacheAdmin = new CacheAdmin(new Configuration());
-    System.exit(cacheAdmin.run(argsArray));
+    int res = ToolRunner.run(cacheAdmin, argsArray);
+    System.exit(res);
   }
 
   private static CacheDirectiveInfo.Expiration parseExpirationString(String ttlString)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/754f15ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
index b78da31..225f11a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * This class implements crypto command-line operations.
@@ -54,6 +55,7 @@ public class CryptoAdmin extends Configured implements Tool {
   public int run(String[] args) throws IOException {
     if (args.length == 0) {
       AdminHelper.printUsage(false, "crypto", COMMANDS);
+      ToolRunner.printGenericCommandUsage(System.err);
       return 1;
     }
     final AdminHelper.Command command = AdminHelper.determineCommand(args[0],
@@ -64,6 +66,7 @@ public class CryptoAdmin extends Configured implements Tool {
         System.err.println("Command names must start with dashes.");
       }
       AdminHelper.printUsage(false, "crypto", COMMANDS);
+      ToolRunner.printGenericCommandUsage(System.err);
       return 1;
     }
     final List<String> argsList = new LinkedList<String>();
@@ -78,9 +81,10 @@ public class CryptoAdmin extends Configured implements Tool {
     }
   }
 
-  public static void main(String[] argsArray) throws IOException {
+  public static void main(String[] argsArray) throws Exception {
     final CryptoAdmin cryptoAdmin = new CryptoAdmin(new Configuration());
-    System.exit(cryptoAdmin.run(argsArray));
+    int res = ToolRunner.run(cryptoAdmin, argsArray);
+    System.exit(res);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/754f15ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index 24079b3..d99b88a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -42,7 +43,8 @@ public class StoragePolicyAdmin extends Configured implements Tool {
   public static void main(String[] argsArray) throws Exception {
     final StoragePolicyAdmin admin = new StoragePolicyAdmin(new
         Configuration());
-    System.exit(admin.run(argsArray));
+    int res = ToolRunner.run(admin, argsArray);
+    System.exit(res);
   }
 
   public StoragePolicyAdmin(Configuration conf) {
@@ -53,6 +55,7 @@ public class StoragePolicyAdmin extends Configured implements Tool {
   public int run(String[] args) throws Exception {
     if (args.length == 0) {
       AdminHelper.printUsage(false, "storagepolicies", COMMANDS);
+      ToolRunner.printGenericCommandUsage(System.err);
       return 1;
     }
     final AdminHelper.Command command = AdminHelper.determineCommand(args[0],
@@ -63,6 +66,7 @@ public class StoragePolicyAdmin extends Configured implements Tool {
         System.err.println("Command names must start with dashes.");
       }
       AdminHelper.printUsage(false, "storagepolicies", COMMANDS);
+      ToolRunner.printGenericCommandUsage(System.err);
       return 1;
     }
     final List<String> argsList = new LinkedList<>();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[5/6] hadoop git commit: MAPREDUCE-6821. Fix javac warning related to the deprecated APIs after upgrading Jackson. Contributed by Yiqin Lin.

Posted by xg...@apache.org.
MAPREDUCE-6821. Fix javac warning related to the deprecated APIs after upgrading Jackson. Contributed by Yiqin Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0b033ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0b033ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0b033ea

Branch: refs/heads/YARN-5734
Commit: b0b033ea2e462356b8bbcf7790953ac09c712430
Parents: 754f15b
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Dec 13 14:20:02 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Dec 13 14:22:07 2016 +0900

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0b033ea/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index a842649..09edd94 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -72,7 +72,7 @@ import com.google.common.base.Charsets;
 class JobSubmitter {
   protected static final Log LOG = LogFactory.getLog(JobSubmitter.class);
   private static final ObjectReader READER =
-      new ObjectMapper().reader(Map.class);
+      new ObjectMapper().readerFor(Map.class);
   private static final String SHUFFLE_KEYGEN_ALGORITHM = "HmacSHA1";
   private static final int SHUFFLE_KEY_LENGTH = 64;
   private FileSystem jtFs;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org