You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2016/10/31 20:54:09 UTC

[1/2] hadoop git commit: HADOOP-13680. fs.s3a.readahead.range to use getLongBytes. Contributed by Abhishek Modi.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ce13463e7 -> c4ccafdaf
  refs/heads/branch-2.8 f0a6d17e3 -> a83f10c90


HADOOP-13680. fs.s3a.readahead.range to use getLongBytes. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4ccafda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4ccafda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4ccafda

Branch: refs/heads/branch-2
Commit: c4ccafdaff88bb01a7d78935b4a243f7d1b5a190
Parents: ce13463
Author: Steve Loughran <st...@apache.org>
Authored: Mon Oct 31 20:52:49 2016 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Oct 31 20:52:49 2016 +0000

----------------------------------------------------------------------
 .../src/main/resources/core-default.xml         | 18 ++++++++++-----
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 17 ++++++++++++---
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 23 +++++++++++++++++++-
 .../src/site/markdown/tools/hadoop-aws/index.md | 22 +++++++++++--------
 .../hadoop/fs/s3a/ITestS3AConfiguration.java    | 13 ++++++++++-
 5 files changed, 73 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4ccafda/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 1beea94..58a5508 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1044,8 +1044,10 @@
 
 <property>
   <name>fs.s3a.multipart.size</name>
-  <value>104857600</value>
-  <description>How big (in bytes) to split upload or copy operations up into.</description>
+  <value>100M</value>
+  <description>How big (in bytes) to split upload or copy operations up into.
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+  </description>
 </property>
 
 <property>
@@ -1053,7 +1055,8 @@
   <value>2147483647</value>
   <description>How big (in bytes) to split upload or copy operations up into.
     This also controls the partition size in renamed files, as rename() involves
-    copying the source file(s)
+    copying the source file(s).
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
   </description>
 </property>
 
@@ -1109,8 +1112,9 @@
 
 <property>
   <name>fs.s3a.block.size</name>
-  <value>33554432</value>
+  <value>32M</value>
   <description>Block size to use when reading files using s3a: file system.
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
   </description>
 </property>
 
@@ -1172,10 +1176,12 @@
 
 <property>
   <name>fs.s3a.readahead.range</name>
-  <value>65536</value>
+  <value>64K</value>
   <description>Bytes to read ahead during a seek() before closing and
   re-opening the S3 HTTP connection. This option will be overridden if
-  any call to setReadahead() is made to an open stream.</description>
+  any call to setReadahead() is made to an open stream.
+  A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+  </description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4ccafda/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index a82fc93..b9b8810 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -182,10 +182,11 @@ public class S3AFileSystem extends FileSystem {
           MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
 
       //check but do not store the block size
-      longOption(conf, FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE, 1);
+      longBytesOption(conf, FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE, 1);
       enableMultiObjectsDelete = conf.getBoolean(ENABLE_MULTI_DELETE, true);
 
-      readAhead = longOption(conf, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE, 0);
+      readAhead = longBytesOption(conf, READAHEAD_RANGE,
+          DEFAULT_READAHEAD_RANGE, 0);
       storageStatistics = (S3AStorageStatistics)
           GlobalStorageStatistics.INSTANCE
               .put(S3AStorageStatistics.NAME,
@@ -357,6 +358,16 @@ public class S3AFileSystem extends FileSystem {
   }
 
   /**
+   * Returns the read ahead range value used by this filesystem
+   * @return
+   */
+
+  @VisibleForTesting
+  long getReadAheadRange() {
+    return readAhead;
+  }
+
+  /**
    * Get the input policy for this FS instance.
    * @return the input policy
    */
@@ -1881,7 +1892,7 @@ public class S3AFileSystem extends FileSystem {
    */
   @Deprecated
   public long getDefaultBlockSize() {
-    return getConf().getLong(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE);
+    return getConf().getLongBytes(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4ccafda/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 56e0c37..49f8862 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -510,6 +510,27 @@ public final class S3AUtils {
   }
 
   /**
+   * Get a long option >= the minimum allowed value, supporting memory
+   * prefixes K,M,G,T,P.
+   * @param conf configuration
+   * @param key key to look up
+   * @param defVal default value
+   * @param min minimum value
+   * @return the value
+   * @throws IllegalArgumentException if the value is below the minimum
+   */
+  static long longBytesOption(Configuration conf,
+                             String key,
+                             long defVal,
+                             long min) {
+    long v = conf.getLongBytes(key, defVal);
+    Preconditions.checkArgument(v >= min,
+            String.format("Value of %s: %d is below the minimum value %d",
+                    key, v, min));
+    return v;
+  }
+
+  /**
    * Get a size property from the configuration: this property must
    * be at least equal to {@link Constants#MULTIPART_MIN_SIZE}.
    * If it is too small, it is rounded up to that minimum, and a warning
@@ -521,7 +542,7 @@ public final class S3AUtils {
    */
   public static long getMultipartSizeProperty(Configuration conf,
       String property, long defVal) {
-    long partSize = conf.getLong(property, defVal);
+    long partSize = conf.getLongBytes(property, defVal);
     if (partSize < MULTIPART_MIN_SIZE) {
       LOG.warn("{} must be at least 5 MB; configured value is {}",
           property, partSize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4ccafda/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index fe4f972..0cb64a2 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -791,16 +791,20 @@ from placing its declaration on the command line.
 
     <property>
       <name>fs.s3a.multipart.size</name>
-      <value>104857600</value>
+      <value>100M</value>
       <description>How big (in bytes) to split upload or copy operations up into.
-      This also controls the partition size in renamed files, as rename() involves
-      copying the source file(s)</description>
+        A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+      </description>
     </property>
 
     <property>
       <name>fs.s3a.multipart.threshold</name>
       <value>2147483647</value>
-      <description>Threshold before uploads or copies use parallel multipart operations.</description>
+      <description>How big (in bytes) to split upload or copy operations up into.
+        This also controls the partition size in renamed files, as rename() involves
+        copying the source file(s).
+        A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+      </description>
     </property>
 
     <property>
@@ -854,7 +858,7 @@ from placing its declaration on the command line.
 
     <property>
       <name>fs.s3a.block.size</name>
-      <value>33554432</value>
+      <value>32M</value>
       <description>Block size to use when reading files using s3a: file system.
       </description>
     </property>
@@ -888,7 +892,7 @@ from placing its declaration on the command line.
 
     <property>
       <name>fs.s3a.readahead.range</name>
-      <value>65536</value>
+      <value>64K</value>
       <description>Bytes to read ahead during a seek() before closing and
       re-opening the S3 HTTP connection. This option will be overridden if
       any call to setReadahead() is made to an open stream.</description>
@@ -1058,9 +1062,9 @@ S3 endpoints, as disks are not used for intermediate data storage.
 
 <property>
   <name>fs.s3a.multipart.size</name>
-  <value>104857600</value>
-  <description>
-  How big (in bytes) to split upload or copy operations up into.
+  <value>100M</value>
+  <description>How big (in bytes) to split upload or copy operations up into.
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
   </description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4ccafda/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
index 6ae9613..9163b15 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
@@ -380,7 +380,7 @@ public class ITestS3AConfiguration {
       byte[] file = ContractTestUtils.toAsciiByteArray("test file");
       ContractTestUtils.writeAndRead(fs,
           new Path("/path/style/access/testFile"), file, file.length,
-          conf.getInt(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true);
+              (int) conf.getLongBytes(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true);
     } catch (final AWSS3IOException e) {
       LOG.error("Caught exception: ", e);
       // Catch/pass standard path style access behaviour when live bucket
@@ -452,6 +452,17 @@ public class ITestS3AConfiguration {
   }
 
   @Test
+  public void testReadAheadRange() throws Exception {
+    conf = new Configuration();
+    conf.set(Constants.READAHEAD_RANGE, "300K");
+    fs = S3ATestUtils.createTestFileSystem(conf);
+    assertNotNull(fs);
+    long readAheadRange = fs.getReadAheadRange();
+    assertNotNull(readAheadRange);
+    assertEquals("Read Ahead Range Incorrect.", 300 * 1024, readAheadRange);
+  }
+
+  @Test
   public void testUsernameFromUGI() throws Throwable {
     final String alice = "alice";
     UserGroupInformation fakeUser =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/2] hadoop git commit: HADOOP-13680. fs.s3a.readahead.range to use getLongBytes. Contributed by Abhishek Modi.

Posted by st...@apache.org.
HADOOP-13680. fs.s3a.readahead.range to use getLongBytes. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a83f10c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a83f10c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a83f10c9

Branch: refs/heads/branch-2.8
Commit: a83f10c90dece921622d6563ad1acc8d8ee3ed65
Parents: f0a6d17
Author: Steve Loughran <st...@apache.org>
Authored: Mon Oct 31 20:52:49 2016 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Oct 31 20:53:22 2016 +0000

----------------------------------------------------------------------
 .../src/main/resources/core-default.xml         | 18 ++++++++++-----
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 17 ++++++++++++---
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 23 +++++++++++++++++++-
 .../src/site/markdown/tools/hadoop-aws/index.md | 22 +++++++++++--------
 .../hadoop/fs/s3a/ITestS3AConfiguration.java    | 13 ++++++++++-
 5 files changed, 73 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a83f10c9/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index adfe8fd..12133b3 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -937,8 +937,10 @@
 
 <property>
   <name>fs.s3a.multipart.size</name>
-  <value>104857600</value>
-  <description>How big (in bytes) to split upload or copy operations up into.</description>
+  <value>100M</value>
+  <description>How big (in bytes) to split upload or copy operations up into.
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+  </description>
 </property>
 
 <property>
@@ -946,7 +948,8 @@
   <value>2147483647</value>
   <description>How big (in bytes) to split upload or copy operations up into.
     This also controls the partition size in renamed files, as rename() involves
-    copying the source file(s)
+    copying the source file(s).
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
   </description>
 </property>
 
@@ -1002,8 +1005,9 @@
 
 <property>
   <name>fs.s3a.block.size</name>
-  <value>33554432</value>
+  <value>32M</value>
   <description>Block size to use when reading files using s3a: file system.
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
   </description>
 </property>
 
@@ -1065,10 +1069,12 @@
 
 <property>
   <name>fs.s3a.readahead.range</name>
-  <value>65536</value>
+  <value>64K</value>
   <description>Bytes to read ahead during a seek() before closing and
   re-opening the S3 HTTP connection. This option will be overridden if
-  any call to setReadahead() is made to an open stream.</description>
+  any call to setReadahead() is made to an open stream.
+  A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+  </description>
 </property>
 
 <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a83f10c9/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index a82fc93..b9b8810 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -182,10 +182,11 @@ public class S3AFileSystem extends FileSystem {
           MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
 
       //check but do not store the block size
-      longOption(conf, FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE, 1);
+      longBytesOption(conf, FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE, 1);
       enableMultiObjectsDelete = conf.getBoolean(ENABLE_MULTI_DELETE, true);
 
-      readAhead = longOption(conf, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE, 0);
+      readAhead = longBytesOption(conf, READAHEAD_RANGE,
+          DEFAULT_READAHEAD_RANGE, 0);
       storageStatistics = (S3AStorageStatistics)
           GlobalStorageStatistics.INSTANCE
               .put(S3AStorageStatistics.NAME,
@@ -357,6 +358,16 @@ public class S3AFileSystem extends FileSystem {
   }
 
   /**
+   * Returns the read ahead range value used by this filesystem
+   * @return
+   */
+
+  @VisibleForTesting
+  long getReadAheadRange() {
+    return readAhead;
+  }
+
+  /**
    * Get the input policy for this FS instance.
    * @return the input policy
    */
@@ -1881,7 +1892,7 @@ public class S3AFileSystem extends FileSystem {
    */
   @Deprecated
   public long getDefaultBlockSize() {
-    return getConf().getLong(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE);
+    return getConf().getLongBytes(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a83f10c9/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 56e0c37..49f8862 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -510,6 +510,27 @@ public final class S3AUtils {
   }
 
   /**
+   * Get a long option >= the minimum allowed value, supporting memory
+   * prefixes K,M,G,T,P.
+   * @param conf configuration
+   * @param key key to look up
+   * @param defVal default value
+   * @param min minimum value
+   * @return the value
+   * @throws IllegalArgumentException if the value is below the minimum
+   */
+  static long longBytesOption(Configuration conf,
+                             String key,
+                             long defVal,
+                             long min) {
+    long v = conf.getLongBytes(key, defVal);
+    Preconditions.checkArgument(v >= min,
+            String.format("Value of %s: %d is below the minimum value %d",
+                    key, v, min));
+    return v;
+  }
+
+  /**
    * Get a size property from the configuration: this property must
    * be at least equal to {@link Constants#MULTIPART_MIN_SIZE}.
    * If it is too small, it is rounded up to that minimum, and a warning
@@ -521,7 +542,7 @@ public final class S3AUtils {
    */
   public static long getMultipartSizeProperty(Configuration conf,
       String property, long defVal) {
-    long partSize = conf.getLong(property, defVal);
+    long partSize = conf.getLongBytes(property, defVal);
     if (partSize < MULTIPART_MIN_SIZE) {
       LOG.warn("{} must be at least 5 MB; configured value is {}",
           property, partSize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a83f10c9/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index fe4f972..0cb64a2 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -791,16 +791,20 @@ from placing its declaration on the command line.
 
     <property>
       <name>fs.s3a.multipart.size</name>
-      <value>104857600</value>
+      <value>100M</value>
       <description>How big (in bytes) to split upload or copy operations up into.
-      This also controls the partition size in renamed files, as rename() involves
-      copying the source file(s)</description>
+        A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+      </description>
     </property>
 
     <property>
       <name>fs.s3a.multipart.threshold</name>
       <value>2147483647</value>
-      <description>Threshold before uploads or copies use parallel multipart operations.</description>
+      <description>How big (in bytes) to split upload or copy operations up into.
+        This also controls the partition size in renamed files, as rename() involves
+        copying the source file(s).
+        A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+      </description>
     </property>
 
     <property>
@@ -854,7 +858,7 @@ from placing its declaration on the command line.
 
     <property>
       <name>fs.s3a.block.size</name>
-      <value>33554432</value>
+      <value>32M</value>
       <description>Block size to use when reading files using s3a: file system.
       </description>
     </property>
@@ -888,7 +892,7 @@ from placing its declaration on the command line.
 
     <property>
       <name>fs.s3a.readahead.range</name>
-      <value>65536</value>
+      <value>64K</value>
       <description>Bytes to read ahead during a seek() before closing and
       re-opening the S3 HTTP connection. This option will be overridden if
       any call to setReadahead() is made to an open stream.</description>
@@ -1058,9 +1062,9 @@ S3 endpoints, as disks are not used for intermediate data storage.
 
 <property>
   <name>fs.s3a.multipart.size</name>
-  <value>104857600</value>
-  <description>
-  How big (in bytes) to split upload or copy operations up into.
+  <value>100M</value>
+  <description>How big (in bytes) to split upload or copy operations up into.
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
   </description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a83f10c9/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
index ebad099..1d00001 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
@@ -378,7 +378,7 @@ public class ITestS3AConfiguration {
       byte[] file = ContractTestUtils.toAsciiByteArray("test file");
       ContractTestUtils.writeAndRead(fs,
           new Path("/path/style/access/testFile"), file, file.length,
-          conf.getInt(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true);
+              (int) conf.getLongBytes(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true);
     } catch (final AWSS3IOException e) {
       LOG.error("Caught exception: ", e);
       // Catch/pass standard path style access behaviour when live bucket
@@ -450,6 +450,17 @@ public class ITestS3AConfiguration {
   }
 
   @Test
+  public void testReadAheadRange() throws Exception {
+    conf = new Configuration();
+    conf.set(Constants.READAHEAD_RANGE, "300K");
+    fs = S3ATestUtils.createTestFileSystem(conf);
+    assertNotNull(fs);
+    long readAheadRange = fs.getReadAheadRange();
+    assertNotNull(readAheadRange);
+    assertEquals("Read Ahead Range Incorrect.", 300 * 1024, readAheadRange);
+  }
+
+  @Test
   public void testUsernameFromUGI() throws Throwable {
     final String alice = "alice";
     UserGroupInformation fakeUser =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org