You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/12/28 04:50:28 UTC

hbase git commit: HBASE-19652 Turn down CleanerChore logging; too chatty

Repository: hbase
Updated Branches:
  refs/heads/master 2c65f039e -> 1050936ce


HBASE-19652 Turn down CleanerChore logging; too chatty


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1050936c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1050936c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1050936c

Branch: refs/heads/master
Commit: 1050936ce7fc8e6466453e64d59597443ae07152
Parents: 2c65f03
Author: Michael Stack <st...@apache.org>
Authored: Wed Dec 27 20:49:26 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Wed Dec 27 20:49:26 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/RegionTooBusyException.java    |  4 --
 .../hbase/client/AsyncRequestFutureImpl.java    |  4 +-
 .../hbase/master/cleaner/CleanerChore.java      | 54 ++++++--------------
 .../hadoop/hbase/regionserver/HRegion.java      |  5 +-
 .../hbase/regionserver/MemStoreFlusher.java     |  5 +-
 .../PressureAwareThroughputController.java      |  2 +-
 .../throttle/ThroughputControlUtil.java         |  2 +-
 7 files changed, 25 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1050936c/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java
index 49431b6..3024962 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java
@@ -32,10 +32,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 public class RegionTooBusyException extends IOException {
   private static final long serialVersionUID = 1728345723728342L;
 
-  /**
-   * Constructor
-   * @param msg message
-   */
   // Be careful. Keep variance in the passed 'msg' low because its msg is used as a key over in
   // RetriesExhaustedWithDetailsException grouping failure types.
   public RegionTooBusyException(final String msg) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1050936c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
index ed1bdb3..d214dca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
@@ -977,12 +977,12 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
                            Throwable error, long backOffTime, boolean willRetry, String startTime,
                            int failed, int stopped) {
     StringBuilder sb = new StringBuilder();
-    sb.append("#").append(asyncProcess.id).append(", table=").append(tableName).append(", ")
+    sb.append("id=").append(asyncProcess.id).append(", table=").append(tableName).append(", ")
         .append("attempt=").append(numAttempt)
         .append("/").append(asyncProcess.numTries).append(" ");
 
     if (failureCount > 0 || error != null){
-      sb.append("failed=").append(failureCount).append("ops").append(", last exception: ").
+      sb.append("failed=").append(failureCount).append("ops").append(", last exception=").
           append(error == null ? "null" : error);
     } else {
       sb.append("succeeded");

http://git-wip-us.apache.org/repos/asf/hbase/blob/1050936c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index bc7c82d..abf132c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -125,7 +125,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
       // but upmost to the number of available processors.
       int size = Math.min(Integer.valueOf(poolSize), AVAIL_PROCESSORS);
       if (size == AVAIL_PROCESSORS) {
-        LOG.warn("Use full core processors to scan dir");
+        LOG.warn("Use full core processors to scan dir, size={}" + size);
       }
       return size;
     } else if (poolSize.matches("0.[0-9]+|1.0")) {
@@ -157,7 +157,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
       for (String className : logCleaners) {
         T logCleaner = newFileCleaner(className, conf);
         if (logCleaner != null) {
-          LOG.debug("initialize cleaner=" + className);
+          LOG.debug("Initialize cleaner=" + className);
           this.cleanersChain.add(logCleaner);
         }
       }
@@ -168,10 +168,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
   public void onConfigurationChange(Configuration conf) {
     int updatedSize = calculatePoolSize(conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE));
     if (updatedSize == chorePoolSize) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Size from configuration is the same as previous which is " +
-          updatedSize + ", no need to update.");
-      }
+      LOG.trace("Size from configuration is same as previous={}, no need to update.", updatedSize);
       return;
     }
     chorePoolSize = updatedSize;
@@ -186,8 +183,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
 
   private void updateChorePoolSize(int updatedSize) {
     chorePool.shutdownNow();
-    LOG.info("Update chore's pool size from " +
-        chorePool.getParallelism() + " to " + updatedSize);
+    LOG.info("Update chore's pool size from {} to {}", chorePool.getParallelism(), updatedSize);
     chorePool = new ForkJoinPool(updatedSize);
   }
 
@@ -208,7 +204,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
       cleaner.init(this.params);
       return cleaner;
     } catch (Exception e) {
-      LOG.warn("Can NOT create CleanerDelegate: " + className, e);
+      LOG.warn("Can NOT create CleanerDelegate={}", className, e);
       // skipping if can't instantiate
       return null;
     }
@@ -218,9 +214,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
   protected void chore() {
     if (getEnabled()) {
       if (runCleaner()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Cleaned old files/dirs under " + oldFileDir + " successfully.");
-        }
+        LOG.debug("Cleaned old files/dirs under {} successfully",  oldFileDir);
       } else {
         LOG.warn("Failed to fully clean old files/dirs under " + oldFileDir + ".");
       }
@@ -274,9 +268,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
           directorySpaces.put(f, space);
           return space;
         } catch (IOException e) {
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("failed to get space consumed by path " + f.getPath(), e);
-          }
+          LOG.trace("Failed to get space consumed by path={}", f.getPath(), e);
           return -1;
         }
       }
@@ -343,9 +335,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
     int deletedFileCount = 0;
     for (FileStatus file : filesToDelete) {
       Path filePath = file.getPath();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removing: " + filePath + " from archive");
-      }
+      LOG.trace("Removing {} from archive", filePath);
       try {
         boolean success = this.fs.delete(filePath, false);
         if (success) {
@@ -426,14 +416,10 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
 
       boolean nullSubDirs = subDirs == null;
       if (nullSubDirs) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("There is no subdir under " + dir);
-        }
+        LOG.trace("There is no subdir under {}", dir);
       }
       if (files == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("There is no file under " + dir);
-        }
+        LOG.trace("There is no file under {}", dir);
       }
 
       int capacity = nullSubDirs ? 0 : subDirs.size();
@@ -449,7 +435,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
 
       boolean result = true;
       result &= deleteAction(() -> checkAndDeleteFiles(files), "files");
-      result &= deleteAction(() -> getCleanRusult(tasks), "subdirs");
+      result &= deleteAction(() -> getCleanResult(tasks), "subdirs");
       // if and only if files and subdirs under current dir are deleted successfully, and
       // it is not the root dir, then task will try to delete it.
       if (result && !root) {
@@ -478,24 +464,16 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
      */
     private boolean deleteAction(Action<Boolean> deletion, String type) {
       boolean deleted;
-      String errorMsg = "";
+      String errorMsg = null;
       try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Start deleting " + type + " under " + dir);
-        }
+        LOG.trace("Start deleting {} under {}", type, dir);
         deleted = deletion.act();
       } catch (IOException ioe) {
         errorMsg = ioe.getMessage();
+        LOG.warn("Could not delete {} under {}; {}", type, dir, errorMsg);
         deleted = false;
       }
-      if (LOG.isDebugEnabled()) {
-        if (deleted) {
-          LOG.debug("Finish deleting " + type + " under " + dir);
-        } else {
-          LOG.debug("Couldn't delete " + type + " completely under " + dir +
-            " with reasons: " + (!errorMsg.equals("") ? errorMsg : " undeletable, please check."));
-        }
-      }
+      LOG.trace("Finish deleting {} under {}, deleted=", type, dir, deleted);
       return deleted;
     }
 
@@ -505,7 +483,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
      * @return true if all subdirs deleted successfully, false for patial/all failures
      * @throws IOException something happen during computation
      */
-    private boolean getCleanRusult(List<CleanerTask> tasks) throws IOException {
+    private boolean getCleanResult(List<CleanerTask> tasks) throws IOException {
       boolean cleaned = true;
       try {
         for (CleanerTask task : tasks) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1050936c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ca1bfd3..b9a5a2b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -4173,10 +4173,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       // Don't print current limit because it will vary too much. The message is used as a key
       // over in RetriesExhaustedWithDetailsException processing.
       throw new RegionTooBusyException("Over memstore limit; regionName=" +
-          (this.getRegionInfo() == null? "unknown": this.getRegionInfo().getRegionNameAsString()) +
+          (this.getRegionInfo() == null? "unknown": this.getRegionInfo().getEncodedName()) +
           ", server=" + (this.getRegionServerServices() == null ? "unknown":
           this.getRegionServerServices().getServerName()) +
-          ", blockingMemStoreSize=" + blockingMemStoreSize);
+          ", blockingMemStoreSize=" +
+          org.apache.hadoop.hbase.procedure2.util.StringUtils.humanSize(blockingMemStoreSize));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1050936c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 9e352ef..1d4e431 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -431,8 +431,7 @@ class MemStoreFlusher implements FlushRequester {
    */
   private boolean flushRegion(final FlushRegionEntry fqe) {
     HRegion region = fqe.region;
-    if (!region.getRegionInfo().isMetaRegion() &&
-        isTooManyStoreFiles(region)) {
+    if (!region.getRegionInfo().isMetaRegion() && isTooManyStoreFiles(region)) {
       if (fqe.isMaximumWait(this.blockingWaitTime)) {
         LOG.info("Waited " + (EnvironmentEdgeManager.currentTime() - fqe.createTime) +
           "ms on a compaction to clean up 'too many store files'; waited " +
@@ -442,7 +441,7 @@ class MemStoreFlusher implements FlushRequester {
         // If this is first time we've been put off, then emit a log message.
         if (fqe.getRequeueCount() <= 0) {
           // Note: We don't impose blockingStoreFiles constraint on meta regions
-          LOG.warn("Region " + region.getRegionInfo().getRegionNameAsString() + " has too many " +
+          LOG.warn("Region " + region.getRegionInfo().getEncodedName() + " has too many " +
             "store files; delaying flush up to " + this.blockingWaitTime + "ms");
           if (!this.server.compactSplitThread.requestSplit(region)) {
             try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1050936c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java
index ec90830..a1bd21b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java
@@ -122,7 +122,7 @@ public abstract class PressureAwareThroughputController extends Configured imple
       // do not log too much
       if (now - operation.lastLogTime > 5L * 1000) {
         LOG.debug("deltaSize: " + deltaSize + " bytes; elapseTime: " + elapsedTime + " ns");
-        LOG.debug(opName + " sleep " + sleepTime + " ms because current throughput is "
+        LOG.debug(opName + " sleep=" + sleepTime + "ms because current throughput is "
             + throughputDesc(deltaSize, elapsedTime) + ", max allowed is "
             + throughputDesc(maxThroughputPerOperation) + ", already slept "
             + operation.numberOfSleeps + " time(s) and total slept time is "

http://git-wip-us.apache.org/repos/asf/hbase/blob/1050936c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java
index 764c065..ad65c59 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java
@@ -49,7 +49,7 @@ public final class ThroughputControlUtil {
         break;
       }
     }
-    return store.getRegionInfo().getRegionNameAsString() + NAME_DELIMITER +
+    return store.getRegionInfo().getEncodedName() + NAME_DELIMITER +
         store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName +
         NAME_DELIMITER + counter;
   }