You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2012/01/26 03:22:32 UTC

svn commit: r1236024 - /lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java

Author: markrmiller
Date: Thu Jan 26 02:22:31 2012
New Revision: 1236024

URL: http://svn.apache.org/viewvc?rev=1236024&view=rev
Log:
print zk state if test is not succesful

Modified:
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java?rev=1236024&r1=1236023&r2=1236024&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java Thu Jan 26 02:22:31 2012
@@ -77,78 +77,88 @@ public class ChaosMonkeyNothingIsSafeTes
   
   @Override
   public void doTest() throws Exception {
-    
-    handle.clear();
-    handle.put("QTime", SKIPVAL);
-    handle.put("timestamp", SKIPVAL);
-    
-    // we cannot do delete by query
-    // as it's not supported for recovery
-    //del("*:*");
-    
-    List<StopableIndexingThread> threads = new ArrayList<StopableIndexingThread>();
-    int threadCount = 1;
-    int i = 0;
-    for (i = 0; i < threadCount; i++) {
-      StopableIndexingThread indexThread = new StopableIndexingThread(i * 50000, true);
-      threads.add(indexThread);
-      indexThread.start();
-    }
-    
-    FullThrottleStopableIndexingThread ftIndexThread = new FullThrottleStopableIndexingThread(
-        clients, i * 50000, true);
-    threads.add(ftIndexThread);
-    ftIndexThread.start();
-    
-    chaosMonkey.startTheMonkey(true, 1500);
+    boolean testsSuccesful = false;
     try {
-      Thread.sleep(atLeast(6000));
+      handle.clear();
+      handle.put("QTime", SKIPVAL);
+      handle.put("timestamp", SKIPVAL);
+      
+      // we cannot do delete by query
+      // as it's not supported for recovery
+      // del("*:*");
+      
+      List<StopableIndexingThread> threads = new ArrayList<StopableIndexingThread>();
+      int threadCount = 1;
+      int i = 0;
+      for (i = 0; i < threadCount; i++) {
+        StopableIndexingThread indexThread = new StopableIndexingThread(
+            i * 50000, true);
+        threads.add(indexThread);
+        indexThread.start();
+      }
+      
+      FullThrottleStopableIndexingThread ftIndexThread = new FullThrottleStopableIndexingThread(
+          clients, i * 50000, true);
+      threads.add(ftIndexThread);
+      ftIndexThread.start();
+      
+      chaosMonkey.startTheMonkey(true, 1500);
+      try {
+        Thread.sleep(atLeast(6000));
+      } finally {
+        chaosMonkey.stopTheMonkey();
+      }
+      
+      for (StopableIndexingThread indexThread : threads) {
+        indexThread.safeStop();
+      }
+      
+      // wait for stop...
+      for (StopableIndexingThread indexThread : threads) {
+        indexThread.join();
+      }
+      
+      // fails will happen...
+      // for (StopableIndexingThread indexThread : threads) {
+      // assertEquals(0, indexThread.getFails());
+      // }
+      
+      // try and wait for any replications and what not to finish...
+      
+      Thread.sleep(2000);
+      
+      // wait until there are no recoveries...
+      waitForThingsToLevelOut();
+      
+      // make sure we again have leaders for each shard
+      for (int j = 1; j < sliceCount; j++) {
+        zkStateReader.getLeaderProps(DEFAULT_COLLECTION, "shard" + j, 10000);
+      }
+      
+      commit();
+      
+      // TODO: assert we didnt kill everyone
+      
+      zkStateReader.updateCloudState(true);
+      assertTrue(zkStateReader.getCloudState().getLiveNodes().size() > 0);
+      
+      checkShardConsistency(false, true);
+      
+      // ensure we have added more than 0 docs
+      long cloudClientDocs = cloudClient.query(new SolrQuery("*:*"))
+          .getResults().getNumFound();
+      
+      assertTrue(cloudClientDocs > 0);
+      
+      if (VERBOSE) System.out.println("control docs:"
+          + controlClient.query(new SolrQuery("*:*")).getResults()
+              .getNumFound() + "\n\n");
+      testsSuccesful = true;
     } finally {
-      chaosMonkey.stopTheMonkey();
-    }
-    
-    for (StopableIndexingThread indexThread : threads) {
-      indexThread.safeStop();
-    }
-    
-    // wait for stop...
-    for (StopableIndexingThread indexThread : threads) {
-      indexThread.join();
-    }
-    
-    
-    // fails will happen...
-//    for (StopableIndexingThread indexThread : threads) {
-//      assertEquals(0, indexThread.getFails());
-//    }
-    
-    // try and wait for any replications and what not to finish...
-    
-    Thread.sleep(2000);
-    
-    // wait until there are no recoveries...
-    waitForThingsToLevelOut();
-    
-    // make sure we again have leaders for each shard
-    for (int j = 1; j < sliceCount; j++) {
-      zkStateReader.getLeaderProps(DEFAULT_COLLECTION, "shard" + j, 10000);
+      if (!testsSuccesful) {
+        printLayout();
+      }
     }
-
-    commit();
-    
-    // TODO: assert we didnt kill everyone
-    
-    zkStateReader.updateCloudState(true);
-    assertTrue(zkStateReader.getCloudState().getLiveNodes().size() > 0);
-    
-    checkShardConsistency(false, false);
-    
-    // ensure we have added more than 0 docs
-    long cloudClientDocs = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
-
-    assertTrue(cloudClientDocs > 0);
-    
-    if (VERBOSE) System.out.println("control docs:" + controlClient.query(new SolrQuery("*:*")).getResults().getNumFound() + "\n\n");
   }
 
   private void waitForThingsToLevelOut() throws KeeperException,