You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2013/02/28 21:04:20 UTC

svn commit: r1451322 - /lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java

Author: markrmiller
Date: Thu Feb 28 20:04:20 2013
New Revision: 1451322

URL: http://svn.apache.org/r1451322
Log:
tests: speed up test a bit

Modified:
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java?rev=1451322&r1=1451321&r2=1451322&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZk2Test.java Thu Feb 28 20:04:20 2013
@@ -60,13 +60,17 @@ public class BasicDistributedZk2Test ext
 
   @BeforeClass
   public static void beforeThisClass2() throws Exception {
-    // TODO: we use an fs based dir because something
-    // like a ram dir will not recover correctly right now
-    // because tran log will still exist on restart and ram
-    // dir will not persist - perhaps translog can empty on
-    // start if using an EphemeralDirectoryFactory 
-    useFactory(null);
+
+  }
+  
+  public BasicDistributedZk2Test() {
+    super();
+    fixShardCount = true;
+    
+    sliceCount = 2;
+    shardCount = 3;
   }
+  
   /*
    * (non-Javadoc)
    * 
@@ -253,17 +257,17 @@ public class BasicDistributedZk2Test ext
     
     int oldLiveNodes = cloudClient.getZkStateReader().getZkClient().getChildren(ZkStateReader.LIVE_NODES_ZKNODE, null, true).size();
     
-    assertEquals(5, oldLiveNodes);
+    assertEquals(4, oldLiveNodes);
     
     // kill a shard
-    CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD2, 0);
+    CloudJettyRunner deadShard = chaosMonkey.stopShard(SHARD1, 0);
 
 
     // we are careful to make sure the downed node is no longer in the state,
     // because on some systems (especially freebsd w/ blackhole enabled), trying
     // to talk to a downed node causes grief
     Set<CloudJettyRunner> jetties = new HashSet<CloudJettyRunner>();
-    jetties.addAll(shardToJetty.get(SHARD2));
+    jetties.addAll(shardToJetty.get(SHARD1));
     jetties.remove(deadShard);
     
     // ensure shard is dead
@@ -277,8 +281,6 @@ public class BasicDistributedZk2Test ext
     
     commit();
     
-    printLayout();
-    
     query("q", "*:*", "sort", "n_tl1 desc");
     
     // long cloudClientDocs = cloudClient.query(new
@@ -290,7 +292,8 @@ public class BasicDistributedZk2Test ext
   
     long numFound1 = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
     
-    index_specific(shardToJetty.get(SHARD2).get(1).client.solrClient, id, 1000, i1, 108, t1,
+    cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1, 15000);
+    index_specific(shardToJetty.get(SHARD1).get(1).client.solrClient, id, 1000, i1, 108, t1,
         "specific doc!");
     
     commit();
@@ -363,7 +366,7 @@ public class BasicDistributedZk2Test ext
     
     waitForRecoveriesToFinish(false);
     
-    deadShardCount = shardToJetty.get(SHARD2).get(0).client.solrClient
+    deadShardCount = shardToJetty.get(SHARD1).get(0).client.solrClient
         .query(query).getResults().getNumFound();
     // if we properly recovered, we should now have the couple missing docs that
     // came in while shard was down
@@ -401,15 +404,12 @@ public class BasicDistributedZk2Test ext
     
     // try a backup command
     final HttpSolrServer client = (HttpSolrServer) shardToJetty.get(SHARD2).get(0).client.solrClient;
-    System.out.println("base url: "+ client.getBaseURL());
     ModifiableSolrParams params = new ModifiableSolrParams();
     params.set("qt", "/replication");
     params.set("command", "backup");
 
     QueryRequest request = new QueryRequest(params);
     NamedList<Object> results = client.request(request );
-    System.out.println("results:" + results);
-    
     
     checkForBackupSuccess(client);