You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2014/01/20 01:37:36 UTC
svn commit: r1559615 - in /lucene/dev/branches/branch_4x: ./ solr/ solr/core/
solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
Author: markrmiller
Date: Mon Jan 20 00:37:35 2014
New Revision: 1559615
URL: http://svn.apache.org/r1559615
Log:
tests: harden this stress test
Modified:
lucene/dev/branches/branch_4x/ (props changed)
lucene/dev/branches/branch_4x/solr/ (props changed)
lucene/dev/branches/branch_4x/solr/core/ (props changed)
lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
Modified: lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java?rev=1559615&r1=1559614&r2=1559615&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java (original)
+++ lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java Mon Jan 20 00:37:35 2014
@@ -97,18 +97,29 @@ public class StressHdfsTest extends Basi
URISyntaxException {
boolean overshard = random().nextBoolean();
+ int rep;
+ int nShards;
+ int maxReplicasPerNode;
if (overshard) {
- createCollection(DELETE_DATA_DIR_COLLECTION, shardCount * 2, 1, 2);
+ nShards = shardCount * 2;
+ maxReplicasPerNode = 8;
+ rep = 2;
} else {
- int rep = shardCount / 2;
- if (rep == 0) rep = 1;
- createCollection(DELETE_DATA_DIR_COLLECTION, rep, 2, 1);
+ nShards = shardCount / 2;
+ maxReplicasPerNode = 1;
+ rep = 2;
+ if (nShards == 0) nShards = 1;
}
+
+ createCollection(DELETE_DATA_DIR_COLLECTION, nShards, rep, maxReplicasPerNode);
waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
cloudClient.getZkStateReader().updateClusterState(true);
+ for (int i = 1; i < nShards + 1; i++) {
+ cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 15000);
+ }
// collect the data dirs
List<String> dataDirs = new ArrayList<String>();