You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/04/06 21:22:43 UTC

[13/50] [abbrv] hadoop git commit: HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed by Walter Su.

HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9fbc73e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9fbc73e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9fbc73e

Branch: refs/heads/YARN-2928
Commit: b9fbc73e57f2d64cbf7109f232d9c670ca6d7df6
Parents: 188b7d0
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Apr 1 12:53:25 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Apr 6 12:08:11 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../apache/hadoop/hdfs/BlockReaderFactory.java  | 24 +++++++++-----------
 .../java/org/apache/hadoop/hdfs/HAUtil.java     | 12 ++++++----
 .../hdfs/server/datanode/BPServiceActor.java    |  8 ++++---
 .../datanode/fsdataset/impl/FsDatasetCache.java |  8 ++++---
 .../server/namenode/FileJournalManager.java     | 22 +++++++++++-------
 .../hadoop/hdfs/server/namenode/NameNode.java   |  4 +++-
 .../hdfs/shortcircuit/ShortCircuitCache.java    |  4 +++-
 .../tools/offlineImageViewer/FSImageLoader.java |  6 +++--
 .../hadoop/hdfs/util/LightWeightHashSet.java    |  6 +++--
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  8 +++++--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      | 16 +++++++++----
 12 files changed, 78 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cba53a3..435fdd7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -373,6 +373,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8009. Signal congestion on the DataNode. (wheat9)
 
+    HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..).
+    (Walter Su via wang)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 1e915b2..8f33899 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -369,9 +369,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
       return null;
     }
     if (clientContext.getDisableLegacyBlockReaderLocal()) {
-      PerformanceAdvisory.LOG.debug(this + ": can't construct " +
-          "BlockReaderLocalLegacy because " +
-          "disableLegacyBlockReaderLocal is set.");
+        PerformanceAdvisory.LOG.debug("{}: can't construct " +
+            "BlockReaderLocalLegacy because " +
+            "disableLegacyBlockReaderLocal is set.", this);
       return null;
     }
     IOException ioe = null;
@@ -410,8 +410,8 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
                       getPathInfo(inetSocketAddress, conf);
     }
     if (!pathInfo.getPathState().getUsableForShortCircuit()) {
-      PerformanceAdvisory.LOG.debug(this + ": " + pathInfo + " is not " +
-          "usable for short circuit; giving up on BlockReaderLocal.");
+      PerformanceAdvisory.LOG.debug("{}: {} is not usable for short circuit; " +
+              "giving up on BlockReaderLocal.", this, pathInfo);
       return null;
     }
     ShortCircuitCache cache = clientContext.getShortCircuitCache();
@@ -426,11 +426,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
       throw exc;
     }
     if (info.getReplica() == null) {
-      if (LOG.isTraceEnabled()) {
-        PerformanceAdvisory.LOG.debug(this + ": failed to get " +
-            "ShortCircuitReplica. Cannot construct " +
-            "BlockReaderLocal via " + pathInfo.getPath());
-      }
+      PerformanceAdvisory.LOG.debug("{}: failed to get " +
+          "ShortCircuitReplica. Cannot construct " +
+          "BlockReaderLocal via {}", this, pathInfo.getPath());
       return null;
     }
     return new BlockReaderLocal.Builder(conf).
@@ -610,9 +608,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
                       getPathInfo(inetSocketAddress, conf);
     }
     if (!pathInfo.getPathState().getUsableForDataTransfer()) {
-      PerformanceAdvisory.LOG.debug(this + ": not trying to create a " +
-          "remote block reader because the UNIX domain socket at " +
-          pathInfo + " is not usable.");
+      PerformanceAdvisory.LOG.debug("{}: not trying to create a " +
+          "remote block reader because the UNIX domain socket at {}" +
+           " is not usable.", this, pathInfo);
       return null;
     }
     if (LOG.isTraceEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index f91f709..0ee57c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -319,12 +319,16 @@ public class HAUtil {
             buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
                 + "//" + specificToken.getService());
         ugi.addToken(alias, specificToken);
-        LOG.debug("Mapped HA service delegation token for logical URI " +
-            haUri + " to namenode " + singleNNAddr);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Mapped HA service delegation token for logical URI " +
+              haUri + " to namenode " + singleNNAddr);
+        }
       }
     } else {
-      LOG.debug("No HA service delegation token found for logical URI " +
-          haUri);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No HA service delegation token found for logical URI " +
+            haUri);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index df582f1..dd6f9ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -580,9 +580,11 @@ class BPServiceActor implements Runnable {
       long createCost = createTime - startTime;
       long sendCost = sendTime - createTime;
       dn.getMetrics().addCacheReport(sendCost);
-      LOG.debug("CacheReport of " + blockIds.size()
-          + " block(s) took " + createCost + " msec to generate and "
-          + sendCost + " msecs for RPC and NN processing");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("CacheReport of " + blockIds.size()
+            + " block(s) took " + createCost + " msec to generate and "
+            + sendCost + " msecs for RPC and NN processing");
+      }
     }
     return cmd;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
index c6408e6..e0df0f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
@@ -319,9 +319,11 @@ public class FsDatasetCache {
       mappableBlockMap.put(key,
           new Value(prevValue.mappableBlock, State.UNCACHING));
       if (deferred) {
-        LOG.debug("{} is anchored, and can't be uncached now.  Scheduling it " +
-            "for uncaching in {} ",
-            key, DurationFormatUtils.formatDurationHMS(revocationPollingMs));
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("{} is anchored, and can't be uncached now.  Scheduling it " +
+                  "for uncaching in {} ",
+              key, DurationFormatUtils.formatDurationHMS(revocationPollingMs));
+        }
         deferredUncachingExecutor.schedule(
             new UncachingTask(key, revocationMs),
             revocationPollingMs, TimeUnit.MILLISECONDS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index 12733fd..b760dd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -320,9 +320,11 @@ public class FileJournalManager implements JournalManager {
       Collection<EditLogInputStream> streams, long fromTxId,
       boolean inProgressOk) throws IOException {
     List<EditLogFile> elfs = matchEditLogs(sd.getCurrentDir());
-    LOG.debug(this + ": selecting input streams starting at " + fromTxId + 
-        (inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") +
-        "from among " + elfs.size() + " candidate file(s)");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(this + ": selecting input streams starting at " + fromTxId +
+          (inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") +
+          "from among " + elfs.size() + " candidate file(s)");
+    }
     addStreamsToCollectionFromFiles(elfs, streams, fromTxId, inProgressOk);
   }
   
@@ -331,8 +333,10 @@ public class FileJournalManager implements JournalManager {
     for (EditLogFile elf : elfs) {
       if (elf.isInProgress()) {
         if (!inProgressOk) {
-          LOG.debug("passing over " + elf + " because it is in progress " +
-              "and we are ignoring in-progress logs.");
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("passing over " + elf + " because it is in progress " +
+                "and we are ignoring in-progress logs.");
+          }
           continue;
         }
         try {
@@ -345,9 +349,11 @@ public class FileJournalManager implements JournalManager {
       }
       if (elf.lastTxId < fromTxId) {
         assert elf.lastTxId != HdfsConstants.INVALID_TXID;
-        LOG.debug("passing over " + elf + " because it ends at " +
-            elf.lastTxId + ", but we only care about transactions " +
-            "as new as " + fromTxId);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("passing over " + elf + " because it ends at " +
+              elf.lastTxId + ", but we only care about transactions " +
+              "as new as " + fromTxId);
+        }
         continue;
       }
       EditLogFileInputStream elfis = new EditLogFileInputStream(elf.getFile(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 04be1ad..4575d64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -1534,7 +1534,9 @@ public class NameNode implements NameNodeStatusMXBean {
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
       conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
-      LOG.debug("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString());
+      }
     }
   }
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index bccfd6e..73c52d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -446,7 +446,9 @@ public class ShortCircuitCache implements Closeable {
           purgeReason = "purging replica because it is stale.";
         }
         if (purgeReason != null) {
-          LOG.debug(this + ": " + purgeReason);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(this + ": " + purgeReason);
+          }
           purge(replica);
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
index fd29106..351ff03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
@@ -147,8 +147,10 @@ class FSImageLoader {
             summary.getCodec(), new BufferedInputStream(new LimitInputStream(
             fin, s.getLength())));
 
-        LOG.debug("Loading section " + s.getName() + " length: " + s.getLength
-                ());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Loading section " + s.getName() + " length: " + s.getLength
+              ());
+        }
         switch (FSImageFormatProtobuf.SectionName.fromString(s.getName())) {
           case STRING_TABLE:
             stringTable = loadStringTable(is);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
index b60ef70..55655ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
@@ -126,8 +126,10 @@ public class LightWeightHashSet<T> implements Collection<T> {
     this.shrinkThreshold = (int) (capacity * minLoadFactor);
 
     entries = new LinkedElement[capacity];
-    LOG.debug("initial capacity=" + initialCapacity + ", max load factor= "
-        + maxLoadFactor + ", min load factor= " + minLoadFactor);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("initial capacity=" + initialCapacity + ", max load factor= "
+          + maxLoadFactor + ", min load factor= " + minLoadFactor);
+    }
   }
 
   public LightWeightHashSet() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
index fc18dbe..164692e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
@@ -136,7 +136,9 @@ final class TokenAspect<T extends FileSystem & Renewable> {
       if (token != null) {
         fs.setDelegationToken(token);
         addRenewAction(fs);
-        LOG.debug("Created new DT for " + token.getService());
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("Created new DT for " + token.getService());
+        }
       }
       hasInitedToken = true;
     }
@@ -149,7 +151,9 @@ final class TokenAspect<T extends FileSystem & Renewable> {
   synchronized void initDelegationToken(UserGroupInformation ugi) {
     Token<?> token = selectDelegationToken(ugi);
     if (token != null) {
-      LOG.debug("Found existing DT for " + token.getService());
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Found existing DT for " + token.getService());
+      }
       fs.setDelegationToken(token);
       hasInitedToken = true;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 12adb05..6a55899 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -224,12 +224,16 @@ public class WebHdfsFileSystem extends FileSystem
       // refetch tokens.  even if ugi has credentials, don't attempt
       // to get another token to match hdfs/rpc behavior
       if (token != null) {
-        LOG.debug("Using UGI token: " + token);
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("Using UGI token: " + token);
+        }
         canRefreshDelegationToken = false; 
       } else {
         token = getDelegationToken(null);
         if (token != null) {
-          LOG.debug("Fetched new token: " + token);
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("Fetched new token: " + token);
+          }
         } else { // security is disabled
           canRefreshDelegationToken = false;
         }
@@ -244,7 +248,9 @@ public class WebHdfsFileSystem extends FileSystem
     boolean replaced = false;
     if (canRefreshDelegationToken) {
       Token<?> token = getDelegationToken(null);
-      LOG.debug("Replaced expired token: " + token);
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Replaced expired token: " + token);
+      }
       setDelegationToken(token);
       replaced = (token != null);
     }
@@ -1188,7 +1194,9 @@ public class WebHdfsFileSystem extends FileSystem
         cancelDelegationToken(delegationToken);
       }
     } catch (IOException ioe) {
-      LOG.debug("Token cancel failed: "+ioe);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Token cancel failed: " + ioe);
+      }
     } finally {
       super.close();
     }