You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ta...@apache.org on 2021/04/26 04:33:05 UTC

[hadoop] branch branch-3.3 updated: HDFS-15967. Improve the log for Short Circuit Local Reads. Contributed by Bhavik Patel.

This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new a5f038b  HDFS-15967. Improve the log for Short Circuit Local Reads. Contributed by Bhavik Patel.
a5f038b is described below

commit a5f038b3f6b798078a3eedb7ae62569acd7a24ba
Author: Takanobu Asanuma <ta...@apache.org>
AuthorDate: Mon Apr 26 13:29:28 2021 +0900

    HDFS-15967. Improve the log for Short Circuit Local Reads. Contributed by Bhavik Patel.
    
    (cherry picked from commit 01bad0e92a1f402930bb4c0d18dd605bdf5627e5)
---
 .../hadoop/hdfs/server/datanode/DataNode.java      | 51 ++++++++--------------
 .../datanode/fsdataset/impl/FsDatasetImpl.java     | 17 +++-----
 2 files changed, 24 insertions(+), 44 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index cc476d8..079dda4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1113,7 +1113,7 @@ public class DataNode extends ReconfigurableBase
       directoryScanner = new DirectoryScanner(data, conf);
       directoryScanner.start();
     } else {
-      LOG.info("Periodic Directory Tree Verification scan " +
+      LOG.warn("Periodic Directory Tree Verification scan " +
               "is disabled because {}",
           reason);
     }
@@ -1316,21 +1316,6 @@ public class DataNode extends ReconfigurableBase
   }
 
   /**
-   * Try to send an error report to the NNs associated with the given
-   * block pool.
-   * @param bpid the block pool ID
-   * @param errCode error code to send
-   * @param errMsg textual message to send
-   */
-  void trySendErrorReport(String bpid, int errCode, String errMsg) {
-    BPOfferService bpos = blockPoolManager.get(bpid);
-    if (bpos == null) {
-      throw new IllegalArgumentException("Bad block pool: " + bpid);
-    }
-    bpos.trySendErrorReport(errCode, errMsg);
-  }
-
-  /**
    * Return the BPOfferService instance corresponding to the given block.
    * @return the BPOS
    * @throws IOException if no such BPOS can be found
@@ -2017,7 +2002,7 @@ public class DataNode extends ReconfigurableBase
       ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
       DataInputStream in = new DataInputStream(buf);
       id.readFields(in);
-      LOG.debug("Got: {}", id);
+      LOG.debug("BlockTokenIdentifier id: {}", id);
       blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode,
           null, null);
     }
@@ -2240,8 +2225,8 @@ public class DataNode extends ReconfigurableBase
       return; // do not shutdown
     }
     
-    LOG.warn("DataNode is shutting down due to failed volumes: ["
-        + failedVolumes + "]");
+    LOG.warn("DataNode is shutting down due to failed volumes: [{}]",
+        failedVolumes);
     shouldRun = false;
   }
     
@@ -2283,7 +2268,7 @@ public class DataNode extends ReconfigurableBase
         curCount.put("networkErrors", curCount.get("networkErrors") + 1L);
         datanodeNetworkCounts.put(host, curCount);
       } catch (ExecutionException e) {
-        LOG.warn("failed to increment network error counts for " + host);
+        LOG.warn("failed to increment network error counts for host: {}", host);
       }
     }
   }
@@ -2333,7 +2318,7 @@ public class DataNode extends ReconfigurableBase
       final ExtendedBlock block, final String msg) {
     FsVolumeSpi volume = getFSDataset().getVolume(block);
     if (volume == null) {
-      LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
+      LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block);
       return;
     }
     bpos.reportBadBlocks(
@@ -2414,7 +2399,7 @@ public class DataNode extends ReconfigurableBase
         transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i],
             xferTargetStorageTypes[i], xferTargetStorageIDs[i]);
       } catch (IOException ie) {
-        LOG.warn("Failed to transfer block " + blocks[i], ie);
+        LOG.warn("Failed to transfer block {}", blocks[i], ie);
       }
     }
   }
@@ -2533,15 +2518,13 @@ public class DataNode extends ReconfigurableBase
     DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
         String[] targetStorageIds, ExtendedBlock b,
         BlockConstructionStage stage, final String clientname) {
-      if (DataTransferProtocol.LOG.isDebugEnabled()) {
-        DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
-                "clientname={}, targets={}, target storage types={}, " +
-                "target storage IDs={}", getClass().getSimpleName(), b,
-            b.getNumBytes(), stage, clientname, Arrays.asList(targets),
-            targetStorageTypes == null ? "[]" :
-                Arrays.asList(targetStorageTypes),
-            targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
-      }
+      DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
+              "clientname={}, targets={}, target storage types={}, " +
+              "target storage IDs={}", getClass().getSimpleName(), b,
+          b.getNumBytes(), stage, clientname, Arrays.asList(targets),
+          targetStorageTypes == null ? "[]" :
+              Arrays.asList(targetStorageTypes),
+          targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
       this.targets = targets;
       this.targetStorageTypes = targetStorageTypes;
       this.targetStorageIds = targetStorageIds;
@@ -2645,7 +2628,7 @@ public class DataNode extends ReconfigurableBase
         LOG.warn("{}:Failed to transfer {} to {} got",
             bpReg, b, targets[0], ie);
       } catch (Throwable t) {
-        LOG.error("Failed to transfer block " + b, t);
+        LOG.error("Failed to transfer block {}", b, t);
       } finally {
         decrementXmitsInProgress();
         IOUtils.closeStream(blockSender);
@@ -3037,7 +3020,7 @@ public class DataNode extends ReconfigurableBase
       }
       for (TokenIdentifier tokenId : tokenIds) {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
-        LOG.debug("Got: {}", id);
+        LOG.debug("BlockTokenIdentifier: {}", id);
         blockPoolTokenSecretManager.checkAccess(id, null, block,
             BlockTokenIdentifier.AccessMode.READ, null, null);
       }
@@ -3077,8 +3060,10 @@ public class DataNode extends ReconfigurableBase
       b.setGenerationStamp(storedGS);
       if (data.isValidRbw(b)) {
         stage = BlockConstructionStage.TRANSFER_RBW;
+        LOG.debug("Replica is being written!");
       } else if (data.isValidBlock(b)) {
         stage = BlockConstructionStage.TRANSFER_FINALIZED;
+        LOG.debug("Replica is finalized!");
       } else {
         final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
         throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 7067b9b..92b1dac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2068,9 +2068,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       datanode.checkDiskErrorAsync(r.getVolume());
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("blockId=" + blockId + ", replica=" + r);
-    }
+    LOG.debug("blockId={}, replica={}", blockId, r);
     return null;
   }
 
@@ -2140,15 +2138,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
             continue;
           }
         } catch(IllegalArgumentException e) {
-          LOG.warn("Parent directory check failed; replica " + info
-              + " is not backed by a local file");
+          LOG.warn("Parent directory check failed; replica {} is " +
+              "not backed by a local file", info);
         }
         removing = volumeMap.remove(bpid, invalidBlks[i]);
         addDeletingBlock(bpid, removing.getBlockId());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Block file " + removing.getBlockURI()
-              + " is to be deleted");
-        }
+        LOG.debug("Block file {} is to be deleted", removing.getBlockURI());
         if (removing instanceof ReplicaInPipeline) {
           ((ReplicaInPipeline) removing).releaseAllBytesReserved();
         }
@@ -2189,8 +2184,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
               dataStorage.getTrashDirectoryForReplica(bpid, removing));
         }
       } catch (ClosedChannelException e) {
-        LOG.warn("Volume " + v + " is closed, ignore the deletion task for " +
-            "block " + invalidBlks[i]);
+        LOG.warn("Volume {} is closed, ignore the deletion task for " +
+            "block: {}", v, invalidBlks[i]);
       }
     }
     if (!errors.isEmpty()) {

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org