You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by re...@apache.org on 2015/12/01 17:50:17 UTC

svn commit: r1717462 - /jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java

Author: reschke
Date: Tue Dec  1 16:50:17 2015
New Revision: 1717462

URL: http://svn.apache.org/viewvc?rev=1717462&view=rev
Log:
OAK-3699: RDBDocumentStore shutdown: improve logging

Modified:
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java?rev=1717462&r1=1717461&r2=1717462&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java Tue Dec  1 16:50:17 2015
@@ -36,6 +36,7 @@ import java.sql.Types;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -438,6 +439,8 @@ public class RDBDocumentStore implements
         } catch (IOException ex) {
             LOG.error("closing connection handler", ex);
         }
+        LOG.info("RDBDocumentStore (" + OakVersion.getVersion() + ") disposed" + getCnStats()
+                + (this.droppedTables.isEmpty() ? "" : " (tables dropped: " + this.droppedTables + ")"));
     }
 
     @Override
@@ -885,6 +888,7 @@ public class RDBDocumentStore implements
             for (List<UpdateOp> chunks : Lists.partition(updates, CHUNKSIZE)) {
                 List<T> docs = new ArrayList<T>();
                 for (UpdateOp update : chunks) {
+                    maintainUpdateStats(collection, update.getId());
                     UpdateUtils.assertUnconditional(update);
                     T doc = collection.newDocument(this);
                     addUpdateCounters(update);
@@ -965,6 +969,7 @@ public class RDBDocumentStore implements
         if (checkConditions && !UpdateUtils.checkConditions(oldDoc, update.getConditions())) {
             return null;
         } else {
+            maintainUpdateStats(collection, update.getId());
             addUpdateCounters(update);
             T doc = createNewDocument(collection, oldDoc, update);
             Lock l = getAndLock(update.getId());
@@ -1742,6 +1747,34 @@ public class RDBDocumentStore implements
         }
     }
 
+    // keeping track of CLUSTER_NODES updates
+    private Map<String, Long> cnUpdates = new ConcurrentHashMap<String, Long>();
+
+    private void maintainUpdateStats(Collection collection, String key) {
+        if (collection == Collection.CLUSTER_NODES) {
+            synchronized (this) {
+                Long old = cnUpdates.get(key);
+                old = old == null ? Long.valueOf(1) : old + 1;
+                cnUpdates.put(key, old);
+            }
+        }
+    }
+
+    private String getCnStats() {
+        if (cnUpdates.isEmpty()) {
+            return "";
+        } else {
+            List<Map.Entry<String, Long>> tmp = new ArrayList<Map.Entry<String, Long>>();
+            tmp.addAll(cnUpdates.entrySet());
+            Collections.sort(tmp, new Comparator<Map.Entry<String, Long>>() {
+                @Override
+                public int compare(Entry<String, Long> o1, Entry<String, Long> o2) {
+                    return o1.getKey().compareTo(o2.getKey());
+                }});
+            return " (Cluster Node updates: " + tmp.toString() + ")";
+        }
+    }
+
     protected Cache<CacheValue, NodeDocument> getNodeDocumentCache() {
         return nodesCache;
     }