You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by al...@apache.org on 2015/09/09 11:50:08 UTC

svn commit: r1701965 - /jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java

Author: alexparvulescu
Date: Wed Sep  9 09:50:07 2015
New Revision: 1701965

URL: http://svn.apache.org/r1701965
Log:
OAK-3359 Compactor progress log


Modified:
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java?rev=1701965&r1=1701964&r2=1701965&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java Wed Sep  9 09:50:07 2015
@@ -65,6 +65,8 @@ public class Compactor {
 
     private final PartialCompactionMap map;
 
+    private final ProgressTracker progress = new ProgressTracker();
+
     /**
      * Map from {@link #getBlobKey(Blob) blob keys} to matching compacted
      * blob record identifiers. Used to de-duplicate copies of the same
@@ -111,8 +113,10 @@ public class Compactor {
      * @return  the compacted state
      */
     public SegmentNodeState compact(NodeState before, NodeState after) {
+        progress.start();
         SegmentNodeState compacted = process(before, after, before).getNodeState();
         writer.flush();
+        progress.stop();
         return compacted;
     }
 
@@ -125,8 +129,10 @@ public class Compactor {
      * @return  the compacted state
      */
     public SegmentNodeState compact(NodeState before, NodeState after, NodeState onto) {
+        progress.start();
         SegmentNodeState compacted = process(before, after, onto).getNodeState();
         writer.flush();
+        progress.stop();
         return compacted;
     }
 
@@ -167,6 +173,7 @@ public class Compactor {
             if (path != null) {
                 log.trace("propertyAdded {}/{}", path, after.getName());
             }
+            progress.onProperty();
             return super.propertyAdded(compact(after));
         }
 
@@ -175,6 +182,7 @@ public class Compactor {
             if (path != null) {
                 log.trace("propertyChanged {}/{}", path, after.getName());
             }
+            progress.onProperty();
             return super.propertyChanged(before, compact(after));
         }
 
@@ -183,6 +191,7 @@ public class Compactor {
             if (path != null) {
                 log.trace("childNodeAdded {}/{}", path, name);
             }
+            progress.onNode();
             RecordId id = null;
             if (after instanceof SegmentNodeState) {
                 id = ((SegmentNodeState) after).getRecordId();
@@ -214,6 +223,7 @@ public class Compactor {
             if (path != null) {
                 log.trace("childNodeChanged {}/{}", path, name);
             }
+            progress.onNode();
 
             RecordId id = null;
             if (after instanceof SegmentNodeState) {
@@ -269,7 +279,7 @@ public class Compactor {
     private Blob compact(Blob blob) {
         if (blob instanceof SegmentBlob) {
             SegmentBlob sb = (SegmentBlob) blob;
-
+            progress.onBinary();
             try {
                 // else check if we've already cloned this specific record
                 RecordId id = sb.getRecordId();
@@ -328,4 +338,54 @@ public class Compactor {
         }
     }
 
+    private class ProgressTracker {
+
+        private final long logAt = Long.getLong("compaction-progress-log",
+                150000);
+
+        private long start = 0;
+
+        private long nodes = 0;
+        private long properties = 0;
+        private long binaries = 0;
+
+        void start() {
+            nodes = 0;
+            properties = 0;
+            binaries = 0;
+            start = System.currentTimeMillis();
+        }
+
+        void onNode() {
+            if (++nodes % logAt == 0) {
+                logProgress(start, false);
+                start = System.currentTimeMillis();
+            }
+        }
+
+        void onProperty() {
+            properties++;
+        }
+
+        void onBinary() {
+            binaries++;
+        }
+
+        void stop() {
+            logProgress(start, true);
+        }
+
+        private void logProgress(long start, boolean done) {
+            log.debug(
+                    "Compacted {} nodes, {} properties, {} binaries in {} ms.",
+                    nodes, properties, binaries, System.currentTimeMillis()
+                            - start);
+            if (done) {
+                log.info(
+                        "Finished compaction: {} nodes, {} properties, {} binaries.",
+                        nodes, properties, binaries);
+            }
+        }
+    }
+
 }