You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2013/02/18 03:37:45 UTC

svn commit: r1447125 [2/2] - in /lucene/dev/branches/lucene4765: ./ dev-tools/ dev-tools/maven/ dev-tools/maven/lucene/ dev-tools/maven/lucene/analysis/common/ dev-tools/maven/lucene/analysis/icu/ dev-tools/maven/lucene/analysis/kuromoji/ dev-tools/mav...

Modified: lucene/dev/branches/lucene4765/dev-tools/maven/solr/webapp/pom.xml.template
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/dev-tools/maven/solr/webapp/pom.xml.template?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/dev-tools/maven/solr/webapp/pom.xml.template (original)
+++ lucene/dev/branches/lucene4765/dev-tools/maven/solr/webapp/pom.xml.template Mon Feb 18 02:37:43 2013
@@ -33,8 +33,8 @@
   <description>Apache Solr Search Server</description>
   <properties>
     <module-directory>solr/webapp</module-directory>
-    <top-level>../../..</top-level>
-    <module-path>${top-level}/${module-directory}</module-path>
+    <relative-top-level>../../..</relative-top-level>
+    <module-path>${relative-top-level}/${module-directory}</module-path>
   </properties>
   <scm>
     <connection>scm:svn:${vc-anonymous-base-url}/${module-directory}</connection>
@@ -114,6 +114,20 @@
           </systemProperties>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>de.thetaphi</groupId>
+        <artifactId>forbiddenapis</artifactId>
+        <executions>
+          <execution>
+            <id>solr-check-forbidden-executors-and-jdk-unsafe-and-deprecated</id>
+            <phase>none</phase> <!-- Block inherited execution -->
+          </execution>
+          <execution>
+            <id>solr-test-check-forbidden-test-apis-and-jdk-unsafe-and-deprecated</id>
+            <phase>none</phase> <!-- Block inherited execution -->
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
   </build>
 </project>

Modified: lucene/dev/branches/lucene4765/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/dev/branches/lucene4765/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Mon Feb 18 02:37:43 2013
@@ -24,8 +24,10 @@ import java.util.concurrent.atomic.Atomi
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.codecs.lucene41.Lucene41PostingsFormat;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
@@ -343,14 +345,20 @@ public class TestConcurrentMergeSchedule
 
   public void testTotalBytesSize() throws Exception {
     Directory d = newDirectory();
+    if (d instanceof MockDirectoryWrapper) {
+      ((MockDirectoryWrapper)d).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
+    }
     IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
     iwc.setMaxBufferedDocs(5);
     iwc.setMergeScheduler(new TrackingCMS());
-    RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    for(int i=0;i<100000;i++) {
+    if (_TestUtil.getPostingsFormat("id").equals("SimpleText")) {
+      // no
+      iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
+    }
+    RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
+    for(int i=0;i<1000;i++) {
       Document doc = new Document();
-      doc.add(newStringField("id", ""+i, Field.Store.NO));
-      doc.add(newTextField("field", "here is some text", Field.Store.NO));
+      doc.add(new StringField("id", ""+i, Field.Store.NO));
       w.addDocument(doc);
 
       if (random().nextBoolean()) {

Modified: lucene/dev/branches/lucene4765/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/branches/lucene4765/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java Mon Feb 18 02:37:43 2013
@@ -1247,8 +1247,10 @@ public abstract class LuceneTestCase ext
       if (maybeWrap) {
         r = maybeWrapReader(r);
       }
-      if (r instanceof AtomicReader) {
-        _TestUtil.checkReader((AtomicReader)r);
+      if (rarely() && r instanceof AtomicReader) {
+        // TODO: not useful to check DirectoryReader (redundant with checkindex)
+        // but maybe sometimes run this on the other crazy readers maybeWrapReader creates?
+        _TestUtil.checkReader(r);
       }
       IndexSearcher ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getContext());
       ret.setSimilarity(classEnvRule.similarity);

Modified: lucene/dev/branches/lucene4765/solr/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/solr/CHANGES.txt?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/solr/CHANGES.txt (original)
+++ lucene/dev/branches/lucene4765/solr/CHANGES.txt Mon Feb 18 02:37:43 2013
@@ -79,6 +79,8 @@ New Features
 
 * SOLR-3855: Doc values support. (Adrien Grand)
 
+* SOLR-4417: Reopen the IndexWriter on SolrCore reload. (Mark Miller)
+
 Bug Fixes
 ----------------------
 
@@ -137,6 +139,13 @@ Bug Fixes
 * SOLR-4467: Ephemeral directory implementations may not recover correctly 
   because the code to clear the tlog files on startup is off. (Mark Miller)
 
+* SOLR-4413: Fix SolrCore#getIndexDir() to return the current index directory.
+  (Gregg Donovan, Mark Miller)
+  
+* SOLR-4469: A new IndexWriter must be opened on SolrCore reload when the index
+  directory has changed and the previous SolrCore's state should not be 
+  propagated. (Mark Miller, Gregg Donovan)
+
 Optimizations
 ----------------------
 

Modified: lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/core/SolrCore.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/core/SolrCore.java?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/core/SolrCore.java (original)
+++ lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/core/SolrCore.java Mon Feb 18 02:37:43 2013
@@ -218,8 +218,13 @@ public final class SolrCore implements S
     return dataDir;
   }
 
-  public String getIndexDir() {  
-    return dataDir + "index/";
+  public String getIndexDir() {
+    synchronized (searcherLock) {
+      if (_searcher == null) return getNewIndexDir();
+      SolrIndexSearcher searcher = _searcher.get();
+      return searcher.getPath() == null ? dataDir + "index/" : searcher
+          .getPath();
+    }
   }
 
 
@@ -386,7 +391,6 @@ public final class SolrCore implements S
   
   public SolrCore reload(SolrResourceLoader resourceLoader, SolrCore prev) throws IOException,
       ParserConfigurationException, SAXException {
-    // TODO - what if indexwriter settings have changed
     
     SolrConfig config = new SolrConfig(resourceLoader,
         getSolrConfig().getName(), null);
@@ -396,9 +400,19 @@ public final class SolrCore implements S
     
     solrCoreState.increfSolrCoreState();
     
+    if (!getNewIndexDir().equals(getIndexDir())) {
+      // the directory is changing, don't pass on state
+      prev = null;
+    }
+    
     SolrCore core = new SolrCore(getName(), getDataDir(), config,
         schema, coreDescriptor, updateHandler, prev);
     core.solrDelPolicy = this.solrDelPolicy;
+    
+    core.getUpdateHandler().getSolrCoreState().newIndexWriter(core, false, false);
+    
+    core.getSearcher(true, false, null, true);
+    
     return core;
   }
 
@@ -1360,7 +1374,7 @@ public final class SolrCore implements S
         }
 
        // for now, turn off caches if this is for a realtime reader (caches take a little while to instantiate)
-        tmp = new SolrIndexSearcher(this, schema, (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
+        tmp = new SolrIndexSearcher(this, newIndexDir, schema, (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
 
       } else {
         // newestSearcher == null at this point
@@ -1370,7 +1384,7 @@ public final class SolrCore implements S
           // so that we pick up any uncommitted changes and so we don't go backwards
           // in time on a core reload
           DirectoryReader newReader = newReaderCreator.call();
-          tmp = new SolrIndexSearcher(this, schema, (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
+          tmp = new SolrIndexSearcher(this, newIndexDir, schema, (realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
         } else {
          // normal open that happens at startup
         // verbose("non-reopen START:");

Modified: lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/handler/SnapShooter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/handler/SnapShooter.java?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/handler/SnapShooter.java (original)
+++ lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/handler/SnapShooter.java Mon Feb 18 02:37:43 2013
@@ -103,7 +103,7 @@ public class SnapShooter {
       Collection<String> files = indexCommit.getFileNames();
       FileCopier fileCopier = new FileCopier();
       
-      Directory dir = solrCore.getDirectoryFactory().get(solrCore.getNewIndexDir(), DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
+      Directory dir = solrCore.getDirectoryFactory().get(solrCore.getIndexDir(), DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
       try {
         fileCopier.copyFiles(dir, files, snapShotDir);
       } finally {

Modified: lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java (original)
+++ lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java Mon Feb 18 02:37:43 2013
@@ -610,13 +610,17 @@ public class CoreAdminHandler extends Re
       }
       if (params.getBool(CoreAdminParams.DELETE_INDEX, false)) {
         core.addCloseHook(new CloseHook() {
+          private String indexDir;
+          
           @Override
-          public void preClose(SolrCore core) {}
+          public void preClose(SolrCore core) {
+            indexDir = core.getIndexDir();
+          }
           
           @Override
           public void postClose(SolrCore core) {
             try {
-              core.getDirectoryFactory().remove(core.getIndexDir());
+              core.getDirectoryFactory().remove(indexDir);
             } catch (IOException e) {
               throw new RuntimeException(e);
             }
@@ -1026,11 +1030,8 @@ public class CoreAdminHandler extends Re
     Directory dir;
     long size = 0;
     try {
-      if (!core.getDirectoryFactory().exists(core.getIndexDir())) {
-        dir = core.getDirectoryFactory().get(core.getNewIndexDir(), DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
-      } else {
-        dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType); 
-      }
+
+      dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType); 
 
       try {
         size = DirectoryFactory.sizeOfDirectory(dir);

Modified: lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java (original)
+++ lucene/dev/branches/lucene4765/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java Mon Feb 18 02:37:43 2013
@@ -116,27 +116,17 @@ public class SolrIndexSearcher extends I
   private Collection<String> storedHighlightFieldNames;
   private DirectoryFactory directoryFactory;
   
-  private final AtomicReader atomicReader; 
+  private final AtomicReader atomicReader;
+  private String path; 
 
   public SolrIndexSearcher(SolrCore core, String path, IndexSchema schema, SolrIndexConfig config, String name, boolean enableCache, DirectoryFactory directoryFactory) throws IOException {
     // we don't need to reserve the directory because we get it from the factory
-    this(core, schema,name, core.getIndexReaderFactory().newReader(directoryFactory.get(path, DirContext.DEFAULT, config.lockType), core), true, enableCache, false, directoryFactory);
+    this(core, path, schema,name, core.getIndexReaderFactory().newReader(directoryFactory.get(path, DirContext.DEFAULT, config.lockType), core), true, enableCache, false, directoryFactory);
   }
 
-  private static String getIndexDir(Directory dir) {
-    if (dir instanceof FSDirectory) {
-      return ((FSDirectory)dir).getDirectory().getAbsolutePath();
-    } else if (dir instanceof NRTCachingDirectory) {
-      // recurse on the delegate
-      return getIndexDir(((NRTCachingDirectory) dir).getDelegate());
-    } else {
-      log.warn("WARNING: Directory impl does not support setting indexDir: " + dir.getClass().getName());
-      return null;
-    }
-  }
-
-  public SolrIndexSearcher(SolrCore core, IndexSchema schema, String name, DirectoryReader r, boolean closeReader, boolean enableCache, boolean reserveDirectory, DirectoryFactory directoryFactory) throws IOException {
+  public SolrIndexSearcher(SolrCore core, String path, IndexSchema schema, String name, DirectoryReader r, boolean closeReader, boolean enableCache, boolean reserveDirectory, DirectoryFactory directoryFactory) throws IOException {
     super(r);
+    this.path = path;
     this.directoryFactory = directoryFactory;
     this.reader = r;
     this.atomicReader = SlowCompositeReaderWrapper.wrap(r);
@@ -210,6 +200,10 @@ public class SolrIndexSearcher extends I
     // do this at the end since an exception in the constructor means we won't close    
     numOpens.incrementAndGet();
   }
+  
+  public String getPath() {
+    return path;
+  }
 
   @Override
   public String toString() {

Modified: lucene/dev/branches/lucene4765/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java (original)
+++ lucene/dev/branches/lucene4765/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java Mon Feb 18 02:37:43 2013
@@ -25,7 +25,6 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
@@ -54,8 +53,6 @@ import org.apache.solr.common.SolrDocume
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.ClusterState;
-import org.apache.solr.common.cloud.DocCollection;
-import org.apache.solr.common.cloud.Replica;
 import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.cloud.ZkCoreNodeProps;
 import org.apache.solr.common.cloud.ZkNodeProps;
@@ -79,7 +76,7 @@ import org.junit.BeforeClass;
 public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
   
   private static final String DEFAULT_COLLECTION = "collection1";
-  private static final boolean DEBUG = false;
+  protected static final boolean DEBUG = false;
   String t1="a_t";
   String i1="a_si";
   String nint = "n_i";
@@ -111,9 +108,6 @@ public class BasicDistributedZkTest exte
   
   @BeforeClass
   public static void beforeThisClass2() throws Exception {
-    // TODO: we use an fs based dir because something
-    // like a ram dir will not recover correctly right now
-    useFactory(null);
   }
   
   @Before
@@ -334,338 +328,12 @@ public class BasicDistributedZkTest exte
     testUpdateProcessorsRunOnlyOnce("distrib-dup-test-chain-explicit");
     testUpdateProcessorsRunOnlyOnce("distrib-dup-test-chain-implicit");
 
-    testCoreUnloadAndLeaders();
-    testUnloadLotsOfCores();
     testStopAndStartCoresInOneInstance();
-    testUnloadShardAndCollection();
     // Thread.sleep(10000000000L);
     if (DEBUG) {
       super.printLayout();
     }
   }
-
-  private void testUnloadShardAndCollection() throws Exception{
-    // create one leader and one replica
-    
-    Create createCmd = new Create();
-    createCmd.setCoreName("test_unload_shard_and_collection_1");
-    String collection = "test_unload_shard_and_collection";
-    createCmd.setCollection(collection);
-    String coreDataDir = dataDir.getAbsolutePath() + File.separator
-        + System.currentTimeMillis() + collection + "1";
-    createCmd.setDataDir(coreDataDir);
-    createCmd.setNumShards(2);
-    
-    SolrServer client = clients.get(0);
-    String url1 = getBaseUrl(client);
-    HttpSolrServer server = new HttpSolrServer(url1);
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    server.request(createCmd);
-    
-    createCmd = new Create();
-    createCmd.setCoreName("test_unload_shard_and_collection_2");
-    collection = "test_unload_shard_and_collection";
-    createCmd.setCollection(collection);
-    coreDataDir = dataDir.getAbsolutePath() + File.separator
-        + System.currentTimeMillis() + collection + "2";
-    createCmd.setDataDir(coreDataDir);
-    
-    server.request(createCmd);
-
-    // now unload one of the two
-    Unload unloadCmd = new Unload(false);
-    unloadCmd.setCoreName("test_unload_shard_and_collection_2");
-    server.request(unloadCmd);
-    
-    // there should be only one shard
-    Slice shard2 = getCommonCloudSolrServer().getZkStateReader().getClusterState().getSlice(collection, "shard2");
-    long timeoutAt = System.currentTimeMillis() + 30000;
-    while (shard2 != null) {
-      if (System.currentTimeMillis() > timeoutAt) {
-        printLayout();
-        fail("Still found shard");
-      }
-      
-      Thread.sleep(50);
-      shard2 = getCommonCloudSolrServer().getZkStateReader().getClusterState().getSlice(collection, "shard2");
-    }
-
-    Slice shard1 = getCommonCloudSolrServer().getZkStateReader().getClusterState().getSlice(collection, "shard1");
-    assertNotNull(shard1);
-    assertTrue(getCommonCloudSolrServer().getZkStateReader().getClusterState().getCollections().contains(collection));
-    
-    // now unload one of the other
-    unloadCmd = new Unload(false);
-    unloadCmd.setCoreName("test_unload_shard_and_collection_1");
-    server.request(unloadCmd);
-    
-    //printLayout();
-    // the collection should be gone
-    timeoutAt = System.currentTimeMillis() + 30000;
-    while (getCommonCloudSolrServer().getZkStateReader().getClusterState().getCollections().contains(collection)) {
-      if (System.currentTimeMillis() > timeoutAt) {
-        printLayout();
-        fail("Still found collection");
-      }
-      
-      Thread.sleep(50);
-    }
-    
-  }
-
-  /**
-   * @throws Exception on any problem
-   */
-  private void testCoreUnloadAndLeaders() throws Exception {
-    // create a new collection collection
-    SolrServer client = clients.get(0);
-    String url1 = getBaseUrl(client);
-    HttpSolrServer server = new HttpSolrServer(url1);
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    
-    Create createCmd = new Create();
-    createCmd.setCoreName("unloadcollection1");
-    createCmd.setCollection("unloadcollection");
-    createCmd.setNumShards(1);
-    String core1DataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_1n";
-    createCmd.setDataDir(core1DataDir);
-    server.request(createCmd);
-    
-    ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
-    
-    zkStateReader.updateClusterState(true);
-
-    int slices = zkStateReader.getClusterState().getCollectionStates().get("unloadcollection").getSlices().size();
-    assertEquals(1, slices);
-    
-    client = clients.get(1);
-    String url2 = getBaseUrl(client);
-    server = new HttpSolrServer(url2);
-    
-    createCmd = new Create();
-    createCmd.setCoreName("unloadcollection2");
-    createCmd.setCollection("unloadcollection");
-    String core2dataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_2n";
-    createCmd.setDataDir(core2dataDir);
-    server.request(createCmd);
-    
-    zkStateReader.updateClusterState(true);
-    slices = zkStateReader.getClusterState().getCollectionStates().get("unloadcollection").getSlices().size();
-    assertEquals(1, slices);
-    
-    waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
-    
-    ZkCoreNodeProps leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
-    
-    Random random = random();
-    HttpSolrServer collectionClient;
-    if (random.nextBoolean()) {
-      collectionClient = new HttpSolrServer(leaderProps.getCoreUrl());
-      // lets try and use the solrj client to index and retrieve a couple
-      // documents
-      SolrInputDocument doc1 = getDoc(id, 6, i1, -600, tlong, 600, t1,
-          "humpty dumpy sat on a wall");
-      SolrInputDocument doc2 = getDoc(id, 7, i1, -600, tlong, 600, t1,
-          "humpty dumpy3 sat on a walls");
-      SolrInputDocument doc3 = getDoc(id, 8, i1, -600, tlong, 600, t1,
-          "humpty dumpy2 sat on a walled");
-      collectionClient.add(doc1);
-      collectionClient.add(doc2);
-      collectionClient.add(doc3);
-      collectionClient.commit();
-    }
-
-    // create another replica for our collection
-    client = clients.get(2);
-    String url3 = getBaseUrl(client);
-    server = new HttpSolrServer(url3);
-    
-    createCmd = new Create();
-    createCmd.setCoreName("unloadcollection3");
-    createCmd.setCollection("unloadcollection");
-    String core3dataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_3n";
-    createCmd.setDataDir(core3dataDir);
-    server.request(createCmd);
-    
-    waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
-    
-    // so that we start with some versions when we reload...
-    DirectUpdateHandler2.commitOnClose = false;
-    
-    HttpSolrServer addClient = new HttpSolrServer(url3 + "/unloadcollection3");
-    addClient.setConnectionTimeout(15000);
-    addClient.setSoTimeout(30000);
-    // add a few docs
-    for (int x = 20; x < 100; x++) {
-      SolrInputDocument doc1 = getDoc(id, x, i1, -600, tlong, 600, t1,
-          "humpty dumpy sat on a wall");
-      addClient.add(doc1);
-    }
-
-    // don't commit so they remain in the tran log
-    //collectionClient.commit();
-    
-    // unload the leader
-    collectionClient = new HttpSolrServer(leaderProps.getBaseUrl());
-    collectionClient.setConnectionTimeout(15000);
-    collectionClient.setSoTimeout(30000);
-    
-    Unload unloadCmd = new Unload(false);
-    unloadCmd.setCoreName(leaderProps.getCoreName());
-    ModifiableSolrParams p = (ModifiableSolrParams) unloadCmd.getParams();
-
-    collectionClient.request(unloadCmd);
-
-//    Thread.currentThread().sleep(500);
-//    printLayout();
-    
-    int tries = 50;
-    while (leaderProps.getCoreUrl().equals(zkStateReader.getLeaderUrl("unloadcollection", "shard1", 15000))) {
-      Thread.sleep(100);
-      if (tries-- == 0) {
-        fail("Leader never changed");
-      }
-    }
-    
-    // ensure there is a leader
-    zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
-    
-    addClient = new HttpSolrServer(url2 + "/unloadcollection2");
-    addClient.setConnectionTimeout(15000);
-    addClient.setSoTimeout(30000);
-    
-    // add a few docs while the leader is down
-    for (int x = 101; x < 200; x++) {
-      SolrInputDocument doc1 = getDoc(id, x, i1, -600, tlong, 600, t1,
-          "humpty dumpy sat on a wall");
-      addClient.add(doc1);
-    }
-    
-    
-    // create another replica for our collection
-    client = clients.get(3);
-    String url4 = getBaseUrl(client);
-    server = new HttpSolrServer(url4);
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    
-    createCmd = new Create();
-    createCmd.setCoreName("unloadcollection4");
-    createCmd.setCollection("unloadcollection");
-    String core4dataDir = dataDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_4n";
-    createCmd.setDataDir(core4dataDir);
-    server.request(createCmd);
-    
-    waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
-    
-    // unload the leader again
-    leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
-    collectionClient = new HttpSolrServer(leaderProps.getBaseUrl());
-    collectionClient.setConnectionTimeout(15000);
-    collectionClient.setSoTimeout(30000);
-    
-    unloadCmd = new Unload(false);
-    unloadCmd.setCoreName(leaderProps.getCoreName());
-    p = (ModifiableSolrParams) unloadCmd.getParams();
-    collectionClient.request(unloadCmd);
-    
-    tries = 50;
-    while (leaderProps.getCoreUrl().equals(zkStateReader.getLeaderUrl("unloadcollection", "shard1", 15000))) {
-      Thread.sleep(100);
-      if (tries-- == 0) {
-        fail("Leader never changed");
-      }
-    }
-    
-    zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
-    
-    
-    // set this back
-    DirectUpdateHandler2.commitOnClose = true;
-    
-    // bring the downed leader back as replica
-    server = new HttpSolrServer(leaderProps.getBaseUrl());
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    
-    createCmd = new Create();
-    createCmd.setCoreName(leaderProps.getCoreName());
-    createCmd.setCollection("unloadcollection");
-    createCmd.setDataDir(core1DataDir);
-    server.request(createCmd);
-
-    waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
-    
-    server = new HttpSolrServer(url2 + "/unloadcollection");
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    server.commit();
-    SolrQuery q = new SolrQuery("*:*");
-    q.set("distrib", false);
-    long found1 = server.query(q).getResults().getNumFound();
-    server = new HttpSolrServer(url3 + "/unloadcollection");
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    server.commit();
-    q = new SolrQuery("*:*");
-    q.set("distrib", false);
-    long found3 = server.query(q).getResults().getNumFound();
-    server = new HttpSolrServer(url4 + "/unloadcollection");
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(30000);
-    server.commit();
-    q = new SolrQuery("*:*");
-    q.set("distrib", false);
-    long found4 = server.query(q).getResults().getNumFound();
-    
-    // all 3 shards should now have the same number of docs
-    assertEquals(found1, found3);
-    assertEquals(found3, found4);
-    
-  }
-  
-  private void testUnloadLotsOfCores() throws Exception {
-    SolrServer client = clients.get(2);
-    String url3 = getBaseUrl(client);
-    final HttpSolrServer server = new HttpSolrServer(url3);
-    server.setConnectionTimeout(15000);
-    server.setSoTimeout(60000);
-    ThreadPoolExecutor executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
-        5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
-        new DefaultSolrThreadFactory("testExecutor"));
-    int cnt = random().nextInt(12) + 1;
-    
-    // create the cores
-    createCores(server, executor, "multiunload", 2, cnt);
-    
-    executor.shutdown();
-    executor.awaitTermination(120, TimeUnit.SECONDS);
-    executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 5,
-        TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
-        new DefaultSolrThreadFactory("testExecutor"));
-    for (int j = 0; j < cnt; j++) {
-      final int freezeJ = j;
-      executor.execute(new Runnable() {
-        @Override
-        public void run() {
-          Unload unloadCmd = new Unload(true);
-          unloadCmd.setCoreName("multiunload" + freezeJ);
-          try {
-            server.request(unloadCmd);
-          } catch (SolrServerException e) {
-            throw new RuntimeException(e);
-          } catch (IOException e) {
-            throw new RuntimeException(e);
-          }
-        }
-      });
-      Thread.sleep(random().nextInt(50));
-    }
-    executor.shutdown();
-    executor.awaitTermination(120, TimeUnit.SECONDS);
-  }
   
   private void testStopAndStartCoresInOneInstance() throws Exception {
     SolrServer client = clients.get(0);
@@ -701,7 +369,7 @@ public class BasicDistributedZkTest exte
 
   }
 
-  private void createCores(final HttpSolrServer server,
+  protected void createCores(final HttpSolrServer server,
       ThreadPoolExecutor executor, final String collection, final int numShards, int cnt) {
     for (int i = 0; i < cnt; i++) {
       final int freezeI = i;
@@ -729,7 +397,7 @@ public class BasicDistributedZkTest exte
     }
   }
 
-  private String getBaseUrl(SolrServer client) {
+  protected String getBaseUrl(SolrServer client) {
     String url2 = ((HttpSolrServer) client).getBaseURL()
         .substring(
             0,
@@ -769,7 +437,7 @@ public class BasicDistributedZkTest exte
     }
   }
   
-  private ZkCoreNodeProps getLeaderUrlFromZk(String collection, String slice) {
+  protected ZkCoreNodeProps getLeaderUrlFromZk(String collection, String slice) {
     ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
     ZkNodeProps leader = clusterState.getLeader(collection, slice);
     if (leader == null) {
@@ -777,89 +445,7 @@ public class BasicDistributedZkTest exte
     }
     return new ZkCoreNodeProps(leader);
   }
-
-  private String checkCollectionExpectations(String collectionName, List<Integer> numShardsNumReplicaList, List<String> nodesAllowedToRunShards) {
-    ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
-    
-    int expectedSlices = numShardsNumReplicaList.get(0);
-    // The Math.min thing is here, because we expect replication-factor to be reduced to if there are not enough live nodes to spread all shards of a collection over different nodes
-    int expectedShardsPerSlice = numShardsNumReplicaList.get(1);
-    int expectedTotalShards = expectedSlices * expectedShardsPerSlice;
-    
-      Map<String,DocCollection> collections = clusterState
-          .getCollectionStates();
-      if (collections.containsKey(collectionName)) {
-        Map<String,Slice> slices = collections.get(collectionName).getSlicesMap();
-        // did we find expectedSlices slices/shards?
-      if (slices.size() != expectedSlices) {
-        return "Found new collection " + collectionName + ", but mismatch on number of slices. Expected: " + expectedSlices + ", actual: " + slices.size();
-      }
-      int totalShards = 0;
-      for (String sliceName : slices.keySet()) {
-        for (Replica replica : slices.get(sliceName).getReplicas()) {
-          if (nodesAllowedToRunShards != null && !nodesAllowedToRunShards.contains(replica.getStr(ZkStateReader.NODE_NAME_PROP))) {
-            return "Shard " + replica.getName() + " created on node " + replica.getStr(ZkStateReader.NODE_NAME_PROP) + " not allowed to run shards for the created collection " + collectionName;
-          }
-        }
-        totalShards += slices.get(sliceName).getReplicas().size();
-      }
-      if (totalShards != expectedTotalShards) {
-        return "Found new collection " + collectionName + " with correct number of slices, but mismatch on number of shards. Expected: " + expectedTotalShards + ", actual: " + totalShards; 
-        }
-      return null;
-    } else {
-      return "Could not find new collection " + collectionName;
-    }
-  }
   
-  private void checkForCollection(String collectionName, List<Integer> numShardsNumReplicaList, List<String> nodesAllowedToRunShards)
-      throws Exception {
-    // check for an expectedSlices new collection - we poll the state
-    long timeoutAt = System.currentTimeMillis() + 120000;
-    boolean success = false;
-    String checkResult = "Didnt get to perform a single check";
-    while (System.currentTimeMillis() < timeoutAt) {
-      checkResult = checkCollectionExpectations(collectionName, numShardsNumReplicaList, nodesAllowedToRunShards);
-      if (checkResult == null) {
-        success = true;
-        break;
-      }
-      Thread.sleep(500);
-    }
-    if (!success) {
-      super.printLayout();
-      fail(checkResult);
-      }
-    }
-
-  private void checkCollectionIsNotCreated(String collectionName)
-    throws Exception {
-    // TODO: REMOVE THIS SLEEP WHEN WE HAVE COLLECTION API RESPONSES
-    Thread.sleep(10000);
-    assertFalse(collectionName + " not supposed to exist", getCommonCloudSolrServer().getZkStateReader().getClusterState().getCollections().contains(collectionName));
-  }
-  
-  private void checkForMissingCollection(String collectionName)
-      throws Exception {
-    // check for a  collection - we poll the state
-    long timeoutAt = System.currentTimeMillis() + 30000;
-    boolean found = true;
-    while (System.currentTimeMillis() < timeoutAt) {
-      getCommonCloudSolrServer().getZkStateReader().updateClusterState(true);
-      ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
-      Map<String,DocCollection> collections = clusterState
-          .getCollectionStates();
-      if (!collections.containsKey(collectionName)) {
-        found = false;
-        break;
-      }
-      Thread.sleep(100);
-    }
-    if (found) {
-      fail("Found collection that should be gone " + collectionName);
-    }
-  }
-
   /**
    * Expects a RegexReplaceProcessorFactories in the chain which will
    * "double up" the values in two (stored) string fields.
@@ -1335,7 +921,7 @@ public class BasicDistributedZkTest exte
   }
 
   volatile CloudSolrServer commondCloudSolrServer;
-  private CloudSolrServer getCommonCloudSolrServer() {
+  protected CloudSolrServer getCommonCloudSolrServer() {
     if (commondCloudSolrServer == null) {
       synchronized(this) {
         try {

Modified: lucene/dev/branches/lucene4765/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java (original)
+++ lucene/dev/branches/lucene4765/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java Mon Feb 18 02:37:43 2013
@@ -84,7 +84,7 @@ public class TestReplicationHandler exte
 
 
   @Before
-  public void setup() throws Exception {
+  public void setUp() throws Exception {
     super.setUp();
     // For manual testing only
     // useFactory(null); // force an FS factory

Modified: lucene/dev/branches/lucene4765/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene4765/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java?rev=1447125&r1=1447124&r2=1447125&view=diff
==============================================================================
--- lucene/dev/branches/lucene4765/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java (original)
+++ lucene/dev/branches/lucene4765/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java Mon Feb 18 02:37:43 2013
@@ -100,7 +100,7 @@ public abstract class BaseDistributedSea
         hostContext.append("_");
       }
       hostContext.append(_TestUtil.randomSimpleString(random(), 3));
-      if ( ! "/".equals(hostContext)) {
+      if ( ! "/".equals(hostContext.toString())) {
         // if our random string is empty, this might add a trailing slash, 
         // but our code should be ok with that
         hostContext.append("/").append(_TestUtil.randomSimpleString(random(), 2));