You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by um...@apache.org on 2012/04/19 04:34:45 UTC
svn commit: r1327788 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/
Author: umamahesh
Date: Thu Apr 19 02:34:44 2012
New Revision: 1327788
URL: http://svn.apache.org/viewvc?rev=1327788&view=rev
Log:
HDFS-3282. Expose getFileLength API. Contributed by Uma Maheswara Rao G.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1327788&r1=1327787&r2=1327788&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Apr 19 02:34:44 2012
@@ -68,6 +68,8 @@ Trunk (unreleased changes)
HDFS-3292. Remove the deprecated DiskStatus, getDiskStatus(), getRawCapacity() and
getRawUsed() from DistributedFileSystem. (Arpit Gupta via szetszwo)
+ HDFS-3282. Expose getFileLength API. (umamahesh)
+
OPTIMIZATIONS
HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1327788&r1=1327787&r2=1327788&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Thu Apr 19 02:34:44 2012
@@ -78,7 +78,6 @@ import org.apache.hadoop.fs.BlockLocatio
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
@@ -91,6 +90,7 @@ import org.apache.hadoop.fs.ParentNotDir
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -1809,41 +1809,13 @@ public class DFSClient implements java.i
}
/**
- * The Hdfs implementation of {@link FSDataInputStream}
+ * @deprecated use {@link HdfsDataInputStream} instead.
*/
- @InterfaceAudience.Private
- public static class DFSDataInputStream extends FSDataInputStream {
- public DFSDataInputStream(DFSInputStream in)
- throws IOException {
- super(in);
- }
-
- /**
- * Returns the datanode from which the stream is currently reading.
- */
- public DatanodeInfo getCurrentDatanode() {
- return ((DFSInputStream)in).getCurrentDatanode();
- }
-
- /**
- * Returns the block containing the target position.
- */
- public ExtendedBlock getCurrentBlock() {
- return ((DFSInputStream)in).getCurrentBlock();
- }
+ @Deprecated
+ public static class DFSDataInputStream extends HdfsDataInputStream {
- /**
- * Return collection of blocks that has already been located.
- */
- synchronized List<LocatedBlock> getAllBlocks() throws IOException {
- return ((DFSInputStream)in).getAllBlocks();
- }
-
- /**
- * @return The visible length of the file.
- */
- public long getVisibleLength() throws IOException {
- return ((DFSInputStream)in).getFileLength();
+ public DFSDataInputStream(DFSInputStream in) throws IOException {
+ super(in);
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1327788&r1=1327787&r2=1327788&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java Thu Apr 19 02:34:44 2012
@@ -224,7 +224,7 @@ public class DFSInputStream extends FSIn
/**
* Return collection of blocks that has already been located.
*/
- synchronized List<LocatedBlock> getAllBlocks() throws IOException {
+ public synchronized List<LocatedBlock> getAllBlocks() throws IOException {
return getBlockRange(0, getFileLength());
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1327788&r1=1327787&r2=1327788&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Thu Apr 19 02:34:44 2012
@@ -46,6 +46,7 @@ import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -194,8 +195,9 @@ public class DistributedFileSystem exten
return dfs.recoverLease(getPathName(f));
}
+ @SuppressWarnings("deprecation")
@Override
- public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+ public HdfsDataInputStream open(Path f, int bufferSize) throws IOException {
statistics.incrementReadOps(1);
return new DFSClient.DFSDataInputStream(
dfs.open(getPathName(f), bufferSize, verifyChecksum));
@@ -623,6 +625,7 @@ public class DistributedFileSystem exten
// We do not see a need for user to report block checksum errors and do not
// want to rely on user to report block corruptions.
@Deprecated
+ @SuppressWarnings("deprecation")
public boolean reportChecksumFailure(Path f,
FSDataInputStream in, long inPos,
FSDataInputStream sums, long sumsPos) {