You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@jackrabbit.apache.org by al...@apache.org on 2011/12/14 17:43:25 UTC

svn commit: r1214329 - in /jackrabbit/trunk/jackrabbit-core/src: main/java/org/apache/jackrabbit/core/ main/java/org/apache/jackrabbit/core/query/ main/java/org/apache/jackrabbit/core/query/lucene/ test/java/org/apache/jackrabbit/core/cluster/

Author: alexparvulescu
Date: Wed Dec 14 16:43:24 2011
New Revision: 1214329

URL: http://svn.apache.org/viewvc?rev=1214329&view=rev
Log:
JCR-3162 Index update overhead on cluster slave due to JCR-905

Added:
    jackrabbit/trunk/jackrabbit-core/src/test/java/org/apache/jackrabbit/core/cluster/DbClusterTestJCR3162.java   (with props)
Modified:
    jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java
    jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/SearchManager.java
    jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java
    jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
    jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java

Modified: jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java?rev=1214329&r1=1214328&r2=1214329&view=diff
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java (original)
+++ jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java Wed Dec 14 16:43:24 2011
@@ -605,7 +605,7 @@ public class RepositoryImpl extends Abst
         if (systemSearchMgr == null) {
             if (repConfig.isSearchEnabled()) {
                 systemSearchMgr = new SearchManager(
-                        context,
+                        null, context,
                         repConfig,
                         getWorkspaceInfo(wspName).itemStateMgr,
                         context.getInternalVersionManager().getPersistenceManager(),
@@ -1853,6 +1853,7 @@ public class RepositoryImpl extends Abst
                     // search manager is lazily instantiated in order to avoid
                     // 'chicken & egg' bootstrap problems
                     searchMgr = new SearchManager(
+                            getName(),
                             context,
                             config,
                             itemStateMgr, persistMgr,

Modified: jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/SearchManager.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/SearchManager.java?rev=1214329&r1=1214328&r2=1214329&view=diff
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/SearchManager.java (original)
+++ jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/SearchManager.java Wed Dec 14 16:43:24 2011
@@ -111,20 +111,21 @@ public class SearchManager implements Sy
     /**
      * Creates a new <code>SearchManager</code>.
      *
-     * @param config         the search configuration.
-     * @param nsReg          the namespace registry.
-     * @param ntReg          the node type registry.
-     * @param itemMgr        the shared item state manager.
-     * @param pm             the underlying persistence manager.
-     * @param rootNodeId     the id of the root node.
-     * @param parentMgr      the parent search manager or <code>null</code> if
-     *                       there is no parent search manager.
-     * @param excludedNodeId id of the node that should be excluded from
-     *                       indexing. Any descendant of that node will also be
-     *                       excluded from indexing.
+     * @param workspace          the workspace name
+     * @param repositoryContext  the repository context        
+     * @param qhf                the query handler factory
+     * @param itemMgr            the shared item state manager.
+     * @param pm                 the underlying persistence manager.
+     * @param rootNodeId         the id of the root node.
+     * @param parentMgr          the parent search manager or <code>null</code> if
+     *                           there is no parent search manager.
+     * @param excludedNodeId     id of the node that should be excluded from
+     *                           indexing. Any descendant of that node will also be
+     *                           excluded from indexing.
      * @throws RepositoryException if the search manager cannot be initialized
      */
     public SearchManager(
+            String workspace,
             RepositoryContext repositoryContext,
             QueryHandlerFactory qhf,
             SharedItemStateManager itemMgr,
@@ -167,10 +168,9 @@ public class SearchManager implements Sy
         }
 
         // initialize query handler
-        this.handler = qhf.getQueryHandler(new QueryHandlerContext(
-                repositoryContext,
-                itemMgr, pm, rootNodeId,
-                parentHandler, excludedNodeId));
+        this.handler = qhf.getQueryHandler(new QueryHandlerContext(workspace,
+                repositoryContext, itemMgr, pm, rootNodeId, parentHandler,
+                excludedNodeId));
     }
 
     /**
@@ -332,10 +332,6 @@ public class SearchManager implements Sy
                 long type = e.getType();
                 if (type == Event.NODE_ADDED) {
                     addedNodes.put(e.getChildId(), e);
-                    // quick'n dirty fix for JCR-905
-                    if (e.isExternal()) {
-                        removedNodes.add(e.getChildId());
-                    }
                     if (e.isShareableChildNode()) {
                         // simply re-index shareable nodes
                         removedNodes.add(e.getChildId());

Modified: jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java?rev=1214329&r1=1214328&r2=1214329&view=diff
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java (original)
+++ jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/QueryHandlerContext.java Wed Dec 14 16:43:24 2011
@@ -22,6 +22,7 @@ import org.apache.jackrabbit.core.Cachin
 import org.apache.jackrabbit.core.HierarchyManager;
 import org.apache.jackrabbit.core.NamespaceRegistryImpl;
 import org.apache.jackrabbit.core.RepositoryContext;
+import org.apache.jackrabbit.core.cluster.ClusterNode;
 import org.apache.jackrabbit.core.id.NodeId;
 import org.apache.jackrabbit.core.nodetype.NodeTypeRegistry;
 import org.apache.jackrabbit.core.persistence.PersistenceManager;
@@ -36,6 +37,11 @@ import org.apache.jackrabbit.core.state.
 public class QueryHandlerContext {
 
     /**
+     * The workspace
+     */
+    private final String workspace;
+
+    /**
      * Repository context.
      */
     private final RepositoryContext repositoryContext;
@@ -78,22 +84,26 @@ public class QueryHandlerContext {
     /**
      * Creates a new context instance.
      *
-     * @param stateMgr         provides persistent item states.
-     * @param pm               the underlying persistence manager.
-     * @param rootId           the id of the root node.
-     * @param parentHandler    the parent query handler or <code>null</code> it
-     *                         there is no parent handler.
-     * @param excludedNodeId   id of the node that should be excluded from
-     *                         indexing. Any descendant of that node is also
-     *                         excluded from indexing.
+     * @param workspace          the workspace name.
+     * @param repositoryContext  the repository context.
+     * @param stateMgr           provides persistent item states.
+     * @param pm                 the underlying persistence manager.
+     * @param rootId             the id of the root node.
+     * @param parentHandler      the parent query handler or <code>null</code> it
+     *                           there is no parent handler.
+     * @param excludedNodeId     id of the node that should be excluded from
+     *                           indexing. Any descendant of that node is also
+     *                           excluded from indexing.
      */
     public QueryHandlerContext(
+            String workspace,
             RepositoryContext repositoryContext,
             SharedItemStateManager stateMgr,
             PersistenceManager pm,
             NodeId rootId,
             QueryHandler parentHandler,
             NodeId excludedNodeId) {
+        this.workspace = workspace;
         this.repositoryContext = repositoryContext;
         this.stateMgr = stateMgr;
         this.hmgr = new CachingHierarchyManager(rootId, stateMgr);
@@ -201,4 +211,17 @@ public class QueryHandlerContext {
         return repositoryContext.getExecutor();
     }
 
+    /**
+     * Returns the cluster node instance of this repository, or
+     * <code>null</code> if clustering is not enabled.
+     * 
+     * @return cluster node
+     */
+    public ClusterNode getClusterNode() {
+        return repositoryContext.getClusterNode();
+    }
+
+    public String getWorkspace() {
+        return workspace;
+    }
 }

Modified: jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java?rev=1214329&r1=1214328&r2=1214329&view=diff
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java (original)
+++ jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java Wed Dec 14 16:43:24 2011
@@ -266,14 +266,13 @@ public class MultiIndex {
         merger.setMergeFactor(handler.getMergeFactor());
         merger.setMinMergeDocs(handler.getMinMergeDocs());
 
-        IndexingQueueStore store = new IndexingQueueStore(indexDir);
-
         // initialize indexing queue
-        this.indexingQueue = new IndexingQueue(store);
+        this.indexingQueue = new IndexingQueue(new IndexingQueueStore(indexDir));
 
         // open persistent indexes
-        for (Iterator<?> it = indexNames.iterator(); it.hasNext(); ) {
-            IndexInfo info = (IndexInfo) it.next();
+        Iterator<IndexInfo> iterator = indexNames.iterator();
+        while (iterator.hasNext()) {
+            IndexInfo info = iterator.next();
             String name = info.getName();
             // only open if it still exists
             // it is possible that indexNames still contains a name for
@@ -386,10 +385,11 @@ public class MultiIndex {
                 executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
                 NodeState rootState = (NodeState) stateMgr.getItemState(rootId);
                 count = createIndex(rootState, rootPath, stateMgr, count);
+                checkIndexingQueue(true);
                 executeAndLog(new Commit(getTransactionId()));
                 log.debug("Created initial index for {} nodes", count);
                 releaseMultiReader();
-                scheduleFlushTask();
+                safeFlush();
             } catch (Exception e) {
                 String msg = "Error indexing workspace";
                 IOException ex = new IOException(msg);
@@ -397,6 +397,7 @@ public class MultiIndex {
                 throw ex;
             } finally {
                 reindexing = false;
+                scheduleFlushTask();
             }
         } else {
             throw new IllegalStateException("Index already present");
@@ -1167,7 +1168,8 @@ public class MultiIndex {
     private void commitVolatileIndex() throws IOException {
 
         // check if volatile index contains documents at all
-        if (volatileIndex.getNumDocuments() > 0) {
+        int volatileIndexDocuments = volatileIndex.getNumDocuments();
+        if (volatileIndexDocuments > 0) {
 
             long time = System.currentTimeMillis();
             // create index
@@ -1185,7 +1187,7 @@ public class MultiIndex {
             resetVolatileIndex();
 
             time = System.currentTimeMillis() - time;
-            log.debug("Committed in-memory index in " + time + "ms.");
+            log.debug("Committed in-memory index containing {} documents in {}ms.", volatileIndexDocuments, time);
         }
     }
 
@@ -1298,7 +1300,7 @@ public class MultiIndex {
             }
         }
     }
-    
+
     void safeFlush() throws IOException{
         synchronized (updateMonitor) {
             updateInProgress = true;

Modified: jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java?rev=1214329&r1=1214328&r2=1214329&view=diff
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java (original)
+++ jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java Wed Dec 14 16:43:24 2011
@@ -41,11 +41,25 @@ import javax.xml.parsers.ParserConfigura
 
 import org.apache.jackrabbit.core.HierarchyManager;
 import org.apache.jackrabbit.core.SessionImpl;
+import org.apache.jackrabbit.core.cluster.ChangeLogRecord;
+import org.apache.jackrabbit.core.cluster.ClusterNode;
+import org.apache.jackrabbit.core.cluster.ClusterRecord;
+import org.apache.jackrabbit.core.cluster.ClusterRecordDeserializer;
+import org.apache.jackrabbit.core.cluster.ClusterRecordProcessor;
+import org.apache.jackrabbit.core.cluster.LockRecord;
+import org.apache.jackrabbit.core.cluster.NamespaceRecord;
+import org.apache.jackrabbit.core.cluster.NodeTypeRecord;
+import org.apache.jackrabbit.core.cluster.PrivilegeRecord;
+import org.apache.jackrabbit.core.cluster.WorkspaceRecord;
 import org.apache.jackrabbit.core.fs.FileSystem;
 import org.apache.jackrabbit.core.fs.FileSystemException;
 import org.apache.jackrabbit.core.fs.FileSystemResource;
 import org.apache.jackrabbit.core.fs.local.LocalFileSystem;
 import org.apache.jackrabbit.core.id.NodeId;
+import org.apache.jackrabbit.core.journal.Journal;
+import org.apache.jackrabbit.core.journal.JournalException;
+import org.apache.jackrabbit.core.journal.Record;
+import org.apache.jackrabbit.core.journal.RecordIterator;
 import org.apache.jackrabbit.core.query.AbstractQueryHandler;
 import org.apache.jackrabbit.core.query.ExecutableQuery;
 import org.apache.jackrabbit.core.query.QueryHandler;
@@ -54,6 +68,7 @@ import org.apache.jackrabbit.core.query.
 import org.apache.jackrabbit.core.query.lucene.directory.FSDirectoryManager;
 import org.apache.jackrabbit.core.query.lucene.hits.AbstractHitCollector;
 import org.apache.jackrabbit.core.session.SessionContext;
+import org.apache.jackrabbit.core.state.ItemState;
 import org.apache.jackrabbit.core.state.ItemStateException;
 import org.apache.jackrabbit.core.state.ItemStateManager;
 import org.apache.jackrabbit.core.state.NoSuchItemStateException;
@@ -547,6 +562,7 @@ public class SearchIndex extends Abstrac
             }
             index.createInitialIndex(context.getItemStateManager(),
                     context.getRootId(), rootPath);
+            checkPendingJournalChanges(context);
         }
         if (consistencyCheckEnabled
                 && (index.getRedoLogApplied() || forceConsistencyCheck)) {
@@ -2477,6 +2493,46 @@ public class SearchIndex extends Abstrac
         this.redoLogFactoryClass = className;
     }
 
+    /**
+     * In the case of an initial index build operation, this checks if there are
+     * some new nodes pending in the journal and tries to preemptively delete
+     * them, to keep the index consistent.
+     * 
+     * See JCR-3162
+     * 
+     * @param context
+     * @throws IOException
+     */
+    private void checkPendingJournalChanges(QueryHandlerContext context) {
+        ClusterNode cn = context.getClusterNode();
+        if (cn == null) {
+            return;
+        }
+
+        List<NodeId> addedIds = new ArrayList<NodeId>();
+        long rev = cn.getRevision();
+
+        List<ChangeLogRecord> changes = getChangeLogRecords(rev, context.getWorkspace());
+        Iterator<ChangeLogRecord> iterator = changes.iterator();
+        while (iterator.hasNext()) {
+            ChangeLogRecord record = iterator.next();
+            for (ItemState state : record.getChanges().addedStates()) {
+                if (!state.isNode()) {
+                    continue;
+                }
+                addedIds.add((NodeId) state.getId());
+            }
+        }
+        if (!addedIds.isEmpty()) {
+            Collection<NodeState> empty = Collections.emptyList();
+            try {
+                updateNodes(addedIds.iterator(), empty.iterator());
+            } catch (Exception e) {
+                log.error(e.getMessage(), e);
+            }
+        }
+    }
+
     //----------------------------< internal >----------------------------------
 
     /**
@@ -2490,4 +2546,79 @@ public class SearchIndex extends Abstrac
             throw new IOException("query handler closed and cannot be used anymore.");
         }
     }
+
+    /**
+     * Polls the underlying journal for events of the type ChangeLogRecord that
+     * happened after a given revision, on a given workspace.
+     *
+     * @param revision
+     *            starting revision
+     * @param workspace
+     *            the workspace name
+     * @return
+     */
+    private List<ChangeLogRecord> getChangeLogRecords(long revision,
+            final String workspace) {
+        log.debug(
+                "Get changes from the Journal for revision {} and workspace {}.",
+                revision, workspace);
+        ClusterNode cn = getContext().getClusterNode();
+        if (cn == null) {
+            return Collections.emptyList();
+        }
+        Journal journal = cn.getJournal();
+        final List<ChangeLogRecord> events = new ArrayList<ChangeLogRecord>();
+        ClusterRecordDeserializer deserializer = new ClusterRecordDeserializer();
+        RecordIterator records = null;
+        try {
+            records = journal.getRecords(revision);
+            while (records.hasNext()) {
+                Record record = records.nextRecord();
+                if (!record.getProducerId().equals(cn.getId())) {
+                    continue;
+                }
+                ClusterRecord r = null;
+                try {
+                    r = deserializer.deserialize(record);
+                } catch (JournalException e) {
+                    log.error(
+                            "Unable to read revision '" + record.getRevision()
+                                    + "'.", e);
+                }
+                if (r == null) {
+                    continue;
+                }
+                r.process(new ClusterRecordProcessor() {
+                    public void process(ChangeLogRecord record) {
+                        String eventW = record.getWorkspace();
+                        if (eventW != null ? eventW.equals(workspace) : workspace == null) {
+                            events.add(record);
+                        }
+                    }
+
+                    public void process(LockRecord record) {
+                    }
+
+                    public void process(NamespaceRecord record) {
+                    }
+
+                    public void process(NodeTypeRecord record) {
+                    }
+
+                    public void process(PrivilegeRecord record) {
+                    }
+
+                    public void process(WorkspaceRecord record) {
+                    }
+                });
+            }
+        } catch (JournalException e1) {
+            log.error(e1.getMessage(), e1);
+        } finally {
+            if (records != null) {
+                records.close();
+            }
+        }
+        return events;
+    }
 }

Added: jackrabbit/trunk/jackrabbit-core/src/test/java/org/apache/jackrabbit/core/cluster/DbClusterTestJCR3162.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/test/java/org/apache/jackrabbit/core/cluster/DbClusterTestJCR3162.java?rev=1214329&view=auto
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/test/java/org/apache/jackrabbit/core/cluster/DbClusterTestJCR3162.java (added)
+++ jackrabbit/trunk/jackrabbit-core/src/test/java/org/apache/jackrabbit/core/cluster/DbClusterTestJCR3162.java Wed Dec 14 16:43:24 2011
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.cluster;
+
+import java.io.File;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.util.UUID;
+
+import javax.jcr.Node;
+import javax.jcr.RepositoryException;
+import javax.jcr.Session;
+import javax.jcr.SimpleCredentials;
+import javax.jcr.query.Query;
+import javax.jcr.query.RowIterator;
+
+import junit.framework.Assert;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.commons.JcrUtils;
+import org.apache.jackrabbit.core.RepositoryImpl;
+import org.apache.jackrabbit.core.config.RepositoryConfig;
+import org.h2.tools.Server;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test for JCR3162
+ */
+public class DbClusterTestJCR3162 {
+
+    private static final SimpleCredentials ADMIN = new SimpleCredentials(
+            "admin", "admin".toCharArray());
+
+    private Server server1;
+    private Server server2;
+
+    private RepositoryImpl rep1;
+    private RepositoryImpl rep2;
+
+    private String clusterId1 = UUID.randomUUID().toString();
+    private String clusterId2 = UUID.randomUUID().toString();
+
+    @Before
+    public void setUp() throws Exception {
+        deleteAll();
+        server1 = Server.createTcpServer("-tcpPort", "9001", "-baseDir",
+                "./target/dbClusterTest/db1", "-tcpAllowOthers").start();
+        server2 = Server.createTcpServer("-tcpPort", "9002", "-baseDir",
+                "./target/dbClusterTest/db2", "-tcpAllowOthers").start();
+        FileUtils
+                .copyFile(
+                        new File(
+                                "./src/test/resources/org/apache/jackrabbit/core/cluster/repository-h2.xml"),
+                        new File("./target/dbClusterTest/node1/repository.xml"));
+        FileUtils
+                .copyFile(
+                        new File(
+                                "./src/test/resources/org/apache/jackrabbit/core/cluster/repository-h2.xml"),
+                        new File("./target/dbClusterTest/node2/repository.xml"));
+
+        System.setProperty(ClusterNode.SYSTEM_PROPERTY_NODE_ID, clusterId1);
+        rep1 = RepositoryImpl.create(RepositoryConfig.create(new File(
+                "./target/dbClusterTest/node1")));
+
+        System.setProperty(ClusterNode.SYSTEM_PROPERTY_NODE_ID, clusterId2);
+
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        try {
+            rep1.shutdown();
+            if (rep2 != null) {
+                rep2.shutdown();
+            }
+        } finally {
+            server1.stop();
+            server2.stop();
+            deleteAll();
+        }
+    }
+
+    private void deleteAll() throws IOException {
+        FileUtils.deleteDirectory(new File("./target/dbClusterTest"));
+    }
+
+    @Test
+    public void test() throws RepositoryException {
+        int count = 5;
+
+        // 1. create
+        Session s1 = rep1.login(ADMIN);
+        Node n = s1.getRootNode().addNode(
+                "test-cluster-" + System.currentTimeMillis(),
+                JcrConstants.NT_UNSTRUCTURED);
+        for (int i = 0; i < count; i++) {
+            n.addNode("child_" + i);
+        }
+        s1.save();
+
+        // 2. rollback journal revision
+        resetJournalRev();
+
+        // 3. sync & verify
+        // rep1.shutdown();
+
+        // start #2 with an empty search index
+        rep2 = RepositoryImpl.create(RepositoryConfig.create(new File(
+                "./target/dbClusterTest/node2")));
+
+        // verify
+        Session s2 = rep2.login(ADMIN);
+        checkConsistency(s2, n.getPath(), count);
+    }
+
+    private void resetJournalRev() {
+        Connection con = null;
+        try {
+            con = DriverManager.getConnection(
+                    "jdbc:h2:tcp://localhost:9001,localhost:9002/db", "sa",
+                    "sa");
+            PreparedStatement prep = con
+                    .prepareStatement("update JOURNAL_LOCAL_REVISIONS set REVISION_ID=0 where JOURNAL_ID=?");
+            prep.setString(1, clusterId2);
+            prep.executeUpdate();
+            prep.close();
+        } catch (Exception e) {
+            e.printStackTrace();
+            Assert.fail("Unable to reset revision to 0. " + e.getMessage());
+        } finally {
+            if (con != null) {
+                try {
+                    con.close();
+                } catch (Exception e) {
+                    // e.printStackTrace();
+                }
+            }
+        }
+    }
+
+    private void checkConsistency(Session s, String path, int nodes)
+            throws RepositoryException {
+
+        s.refresh(true);
+        Node n = s.getNode(path);
+        Assert.assertNotNull(n);
+
+        int found = 0;
+        for (Node c : JcrUtils.getChildNodes(n)) {
+            found++;
+        }
+        Assert.assertEquals(nodes, found);
+
+        RowIterator result = s
+                .getWorkspace()
+                .getQueryManager()
+                .createQuery(
+                        "SELECT * FROM [" + JcrConstants.NT_UNSTRUCTURED
+                                + "] as NODE WHERE ischildnode(NODE, [" + path
+                                + "])", Query.JCR_SQL2).execute().getRows();
+
+        int foundViaQuery = 0;
+        while (result.hasNext()) {
+            result.next();
+            foundViaQuery++;
+        }
+        Assert.assertEquals(nodes, foundViaQuery);
+    }
+}

Propchange: jackrabbit/trunk/jackrabbit-core/src/test/java/org/apache/jackrabbit/core/cluster/DbClusterTestJCR3162.java
------------------------------------------------------------------------------
    svn:mime-type = text/plain