You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2011/10/26 20:24:29 UTC
svn commit: r1189360 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
Author: szetszwo
Date: Wed Oct 26 18:24:29 2011
New Revision: 1189360
URL: http://svn.apache.org/viewvc?rev=1189360&view=rev
Log:
HDFS-2494. Close the streams and DFSClient in DatanodeWebHdfsMethods. Contributed by Uma Maheswara Rao G
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1189360&r1=1189359&r2=1189360&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Oct 26 18:24:29 2011
@@ -159,6 +159,9 @@ Trunk (unreleased changes)
HDFS-2411. The the auth to local mappings are not being respected, with
webhdfs enabled. (jitendra)
+ HDFS-2494. Close the streams and DFSClient in DatanodeWebHdfsMethods.
+ (Uma Maheswara Rao G via szetszwo)
+
Release 0.23.0 - Unreleased
INCOMPATIBLE CHANGES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1189360&r1=1189359&r2=1189360&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Wed Oct 26 18:24:29 2011
@@ -152,17 +152,23 @@ public class DatanodeWebHdfsMethods {
{
final Configuration conf = new Configuration(datanode.getConf());
final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
- final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
final int b = bufferSize.getValue(conf);
- final FSDataOutputStream out = new FSDataOutputStream(dfsclient.create(
- fullpath, permission.getFsPermission(),
- overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
- : EnumSet.of(CreateFlag.CREATE),
- replication.getValue(conf), blockSize.getValue(conf), null, b), null);
+ DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
+ FSDataOutputStream out = null;
try {
+ out = new FSDataOutputStream(dfsclient.create(
+ fullpath, permission.getFsPermission(),
+ overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
+ : EnumSet.of(CreateFlag.CREATE),
+ replication.getValue(conf), blockSize.getValue(conf), null, b), null);
IOUtils.copyBytes(in, out, b);
- } finally {
out.close();
+ out = null;
+ dfsclient.close();
+ dfsclient = null;
+ } finally {
+ IOUtils.cleanup(LOG, out);
+ IOUtils.cleanup(LOG, dfsclient);
}
final InetSocketAddress nnHttpAddr = NameNode.getHttpAddress(conf);
final URI uri = new URI(WebHdfsFileSystem.SCHEME, null,
@@ -227,13 +233,19 @@ public class DatanodeWebHdfsMethods {
{
final Configuration conf = new Configuration(datanode.getConf());
final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
- final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
final int b = bufferSize.getValue(conf);
- final FSDataOutputStream out = dfsclient.append(fullpath, b, null, null);
+ DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
+ FSDataOutputStream out = null;
try {
+ out = dfsclient.append(fullpath, b, null, null);
IOUtils.copyBytes(in, out, b);
- } finally {
out.close();
+ out = null;
+ dfsclient.close();
+ dfsclient = null;
+ } finally {
+ IOUtils.cleanup(LOG, out);
+ IOUtils.cleanup(LOG, dfsclient);
}
return Response.ok().type(MediaType.APPLICATION_JSON).build();
}
@@ -301,18 +313,36 @@ public class DatanodeWebHdfsMethods {
case OPEN:
{
final int b = bufferSize.getValue(conf);
- final DFSDataInputStream in = new DFSClient.DFSDataInputStream(
- dfsclient.open(fullpath, b, true));
- in.seek(offset.getValue());
-
+ DFSDataInputStream in = null;
+ try {
+ in = new DFSClient.DFSDataInputStream(
+ dfsclient.open(fullpath, b, true));
+ in.seek(offset.getValue());
+ } catch(IOException ioe) {
+ IOUtils.cleanup(LOG, in);
+ IOUtils.cleanup(LOG, dfsclient);
+ throw ioe;
+ }
+ final DFSDataInputStream dis = in;
final StreamingOutput streaming = new StreamingOutput() {
@Override
public void write(final OutputStream out) throws IOException {
final Long n = length.getValue();
- if (n == null) {
- IOUtils.copyBytes(in, out, b);
- } else {
- IOUtils.copyBytes(in, out, n, false);
+ DFSDataInputStream dfsin = dis;
+ DFSClient client = dfsclient;
+ try {
+ if (n == null) {
+ IOUtils.copyBytes(dfsin, out, b);
+ } else {
+ IOUtils.copyBytes(dfsin, out, n, false);
+ }
+ dfsin.close();
+ dfsin = null;
+ dfsclient.close();
+ client = null;
+ } finally {
+ IOUtils.cleanup(LOG, dfsin);
+ IOUtils.cleanup(LOG, client);
}
}
};
@@ -324,7 +354,15 @@ public class DatanodeWebHdfsMethods {
}
case GETFILECHECKSUM:
{
- final MD5MD5CRC32FileChecksum checksum = dfsclient.getFileChecksum(fullpath);
+ MD5MD5CRC32FileChecksum checksum = null;
+ DFSClient client = dfsclient;
+ try {
+ checksum = client.getFileChecksum(fullpath);
+ client.close();
+ client = null;
+ } finally {
+ IOUtils.cleanup(LOG, client);
+ }
final String js = JsonUtil.toJsonString(checksum);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}