You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2011/11/11 20:50:56 UTC

svn commit: r1201036 [1/4] - in /lucene/dev/trunk: lucene/contrib/demo/src/java/org/apache/lucene/demo/ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/ l...

Author: mikemccand
Date: Fri Nov 11 19:50:51 2011
New Revision: 1201036

URL: http://svn.apache.org/viewvc?rev=1201036&view=rev
Log:
LUCENE-3454: rename IW.optimize -> IW.forceMerge

Added:
    lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java
      - copied, changed from r1200273, lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestForceMergeForever.java
      - copied, changed from r1200273, lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOptimizeForever.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java
      - copied, changed from r1200273, lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java
      - copied, changed from r1200273, lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestThreadedForceMerge.java
      - copied, changed from r1200273, lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestThreadedOptimize.java
    lucene/dev/trunk/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ForceMergeTask.java
      - copied, changed from r1200273, lucene/dev/trunk/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OptimizeTask.java
Removed:
    lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOptimizeForever.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestThreadedOptimize.java
    lucene/dev/trunk/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OptimizeTask.java
Modified:
    lucene/dev/trunk/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java
    lucene/dev/trunk/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
    lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java
    lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java
    lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
    lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java
    lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java
    lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
    lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java
    lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
    lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java
    lucene/dev/trunk/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexCommit.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexReader.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriter.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MergePolicy.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiReader.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/ParallelReader.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/TieredMergePolicy.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/store/FlushInfo.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/store/MergeInfo.java
    lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java
    lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java
    lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java
    lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java
    lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/Test2BPostings.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/Test2BTerms.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestCodecs.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocCount.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFlex.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexCommit.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestNorms.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOmitPositions.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestParallelReader.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPayloads.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressNRT.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSumDocFreq.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestTermsEnum.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestTieredMergePolicy.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/codecs/lucene3x/TestTermInfosReaderIndex.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/codecs/perfield/TestPerFieldPostingsFormat.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/values/TestTypePromotion.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/FieldCacheRewriteMethod.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestScorerPerf.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTopDocsMerge.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java
    lucene/dev/trunk/modules/benchmark/conf/analyzer.alg
    lucene/dev/trunk/modules/benchmark/conf/collector-small.alg
    lucene/dev/trunk/modules/benchmark/conf/collector.alg
    lucene/dev/trunk/modules/benchmark/conf/deletes.alg
    lucene/dev/trunk/modules/benchmark/conf/facets.alg
    lucene/dev/trunk/modules/benchmark/conf/highlight-profile.alg
    lucene/dev/trunk/modules/benchmark/conf/highlight-vs-vector-highlight.alg
    lucene/dev/trunk/modules/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg
    lucene/dev/trunk/modules/benchmark/conf/indexing-flush-by-RAM.alg
    lucene/dev/trunk/modules/benchmark/conf/indexing-multithreaded.alg
    lucene/dev/trunk/modules/benchmark/conf/indexing.alg
    lucene/dev/trunk/modules/benchmark/conf/micro-standard-flush-by-ram.alg
    lucene/dev/trunk/modules/benchmark/conf/micro-standard.alg
    lucene/dev/trunk/modules/benchmark/conf/sample.alg
    lucene/dev/trunk/modules/benchmark/conf/sloppy-phrase.alg
    lucene/dev/trunk/modules/benchmark/conf/sort-standard.alg
    lucene/dev/trunk/modules/benchmark/conf/standard-flush-by-RAM.alg
    lucene/dev/trunk/modules/benchmark/conf/standard-highlights-notv.alg
    lucene/dev/trunk/modules/benchmark/conf/standard-highlights-tv.alg
    lucene/dev/trunk/modules/benchmark/conf/standard.alg
    lucene/dev/trunk/modules/benchmark/conf/vector-highlight-profile.alg
    lucene/dev/trunk/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
    lucene/dev/trunk/modules/facet/src/java/org/apache/lucene/facet/index/FacetsPayloadProcessorProvider.java
    lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
    lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java
    lucene/dev/trunk/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
    lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java

Modified: lucene/dev/trunk/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java (original)
+++ lucene/dev/trunk/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java Fri Nov 11 19:50:51 2011
@@ -109,12 +109,12 @@ public class IndexFiles {
       indexDocs(writer, docDir);
 
       // NOTE: if you want to maximize search performance,
-      // you can optionally call optimize here.  This can be
-      // a costly operation, so generally it's only worth
-      // it when your index is relatively static (ie you're
-      // done adding documents to it):
+      // you can optionally call forceMerge here.  This can be
+      // a terribly costly operation, so generally it's only
+      // worth it when your index is relatively static (ie
+      // you're done adding documents to it):
       //
-      // writer.optimize();
+      // writer.forceMerge(1);
 
       writer.close();
 

Modified: lucene/dev/trunk/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (original)
+++ lucene/dev/trunk/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java Fri Nov 11 19:50:51 2011
@@ -1635,7 +1635,7 @@ public class HighlighterTest extends Bas
     writer.addDocument( doc( "t_text1", "more random words for second field del" ) );
     writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) );
     writer.addDocument( doc( "t_text1", "more random words for second field" ) );
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
   }
   
@@ -1643,7 +1643,7 @@ public class HighlighterTest extends Bas
     IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND));
     writer.deleteDocuments( new Term( "t_text1", "del" ) );
     // To see negative idf, keep comment the following line
-    //writer.optimize();
+    //writer.forceMerge(1);
     writer.close();
   }
   
@@ -1759,7 +1759,7 @@ public class HighlighterTest extends Bas
     doc = new Document();
     doc.add(nfield);
     writer.addDocument(doc, analyzer);
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
     reader = IndexReader.open(ramDir, true);
     numHighlights = 0;

Modified: lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (original)
+++ lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java Fri Nov 11 19:50:51 2011
@@ -96,7 +96,7 @@ public class InstantiatedIndex
    * Creates a new instantiated index that looks just like the index in a specific state as represented by a reader.
    *
    * @param sourceIndexReader the source index this new instantiated index will be copied from.
-   * @throws IOException if the source index is not optimized, or when accessing the source.
+   * @throws IOException if the source index is not single-segment, or when accessing the source.
    */
   public InstantiatedIndex(IndexReader sourceIndexReader) throws IOException {
     this(sourceIndexReader, null);
@@ -109,13 +109,13 @@ public class InstantiatedIndex
    *
    * @param sourceIndexReader the source index this new instantiated index will be copied from.
    * @param fields fields to be added, or null for all
-   * @throws IOException if the source index is not optimized, or when accessing the source.
+   * @throws IOException if the source index is not single-segment, or when accessing the source.
    */
   public InstantiatedIndex(IndexReader sourceIndexReader, Set<String> fields) throws IOException {
 
-    if (!sourceIndexReader.isOptimized()) {
-      System.out.println(("Source index is not optimized."));      
-      //throw new IOException("Source index is not optimized.");
+    if (sourceIndexReader.getSequentialSubReaders().length != 1) {
+      System.out.println(("Source index has more than one segment."));      
+      //throw new IOException("Source index has more than one segment.");
     }
 
 

Modified: lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (original)
+++ lucene/dev/trunk/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java Fri Nov 11 19:50:51 2011
@@ -56,14 +56,6 @@ public class InstantiatedIndexReader ext
   }
 
   /**
-   * @return always true.
-   */
-  @Override
-  public boolean isOptimized() {
-    return true;
-  }
-
-  /**
    * An InstantiatedIndexReader is not a snapshot in time, it is completely in
    * sync with the latest commit to the store!
    * 

Copied: lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java (from r1200273, lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java)
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java?p2=lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java&p1=lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java&r1=1200273&r2=1201036&rev=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (original)
+++ lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java Fri Nov 11 19:50:51 2011
@@ -30,7 +30,7 @@ import org.apache.lucene.document.TextFi
 /**
  * @since 2009-mar-30 13:15:49
  */
-public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase {
+public class TestMultiSegmentReaderOnConstructor extends LuceneTestCase {
 
   public void test() throws Exception {
     Directory dir = newDirectory();
@@ -49,18 +49,18 @@ public class TestUnoptimizedReaderOnCons
     addDocument(iw, "All work and no play makes wendy a dull girl");
     iw.close();
 
-    IndexReader unoptimizedReader = IndexReader.open(dir, false);
-    unoptimizedReader.deleteDocument(2);
+    IndexReader multiSegReader = IndexReader.open(dir, false);
+    multiSegReader.deleteDocument(2);
 
     try {
-      new InstantiatedIndex(unoptimizedReader);
+      new InstantiatedIndex(multiSegReader);
     } catch (Exception e) {
       e.printStackTrace(System.out);
-      fail("No exceptions when loading an unoptimized reader!");
+      fail("No exceptions when loading a multi-seg reader!");
     }
 
     // todo some assertations.
-    unoptimizedReader.close();
+    multiSegReader.close();
     dir.close();
   }
 

Modified: lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java (original)
+++ lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java Fri Nov 11 19:50:51 2011
@@ -105,22 +105,22 @@ public class BalancedSegmentMergePolicy 
   }
   
   @Override
-  public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToOptimize) throws IOException {
+  public MergeSpecification findForcedMerges(SegmentInfos infos, int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToMerge) throws IOException {
     
     assert maxNumSegments > 0;
 
     MergeSpecification spec = null;
 
-    if (!isOptimized(infos, maxNumSegments, segmentsToOptimize)) {
+    if (!isMerged(infos, maxNumSegments, segmentsToMerge)) {
 
       // Find the newest (rightmost) segment that needs to
-      // be optimized (other segments may have been flushed
-      // since optimize started):
+      // be merged (other segments may have been flushed
+      // since the merge started):
       int last = infos.size();
       while(last > 0) {
 
         final SegmentInfo info = infos.info(--last);
-        if (segmentsToOptimize.containsKey(info)) {
+        if (segmentsToMerge.containsKey(info)) {
           last++;
           break;
         }
@@ -130,9 +130,9 @@ public class BalancedSegmentMergePolicy 
 
         if (maxNumSegments == 1) {
 
-          // Since we must optimize down to 1 segment, the
+          // Since we must merge down to 1 segment, the
           // choice is simple:
-          if (last > 1 || !isOptimized(infos.info(0))) {
+          if (last > 1 || !isMerged(infos.info(0))) {
 
             spec = new MergeSpecification();
             spec.add(new OneMerge(infos.asList().subList(0, last)));

Modified: lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java (original)
+++ lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java Fri Nov 11 19:50:51 2011
@@ -36,8 +36,8 @@ import org.apache.lucene.store.FSDirecto
  *
  * <p>This tool does file-level copying of segments files.
  * This means it's unable to split apart a single segment
- * into multiple segments.  For example if your index is
- * optimized, this tool won't help.  Also, it does basic
+ * into multiple segments.  For example if your index is a
+ * single segment, this tool won't help.  Also, it does basic
  * file-level copying (using simple
  * File{In,Out}putStream) so it will not work with non
  * FSDirectory Directory impls.</p>

Modified: lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java (original)
+++ lucene/dev/trunk/lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java Fri Nov 11 19:50:51 2011
@@ -50,8 +50,8 @@ public class IndexMergeTool {
     System.out.println("Merging...");
     writer.addIndexes(indexes);
 
-    System.out.println("Optimizing...");
-    writer.optimize();
+    System.out.println("Full merge...");
+    writer.forceMerge(1);
     writer.close();
     System.out.println("Done.");
   }

Modified: lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (original)
+++ lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java Fri Nov 11 19:50:51 2011
@@ -94,7 +94,7 @@ public class TestIndexSplitter extends L
     fsDir.close();
   }
 
-  public void testDeleteThenOptimize() throws Exception {
+  public void testDeleteThenFullMerge() throws Exception {
     // Create directories where the indexes will reside
     File indexPath = new File(TEMP_DIR, "testfilesplitter");
     _TestUtil.rmDir(indexPath);
@@ -134,7 +134,7 @@ public class TestIndexSplitter extends L
     indexReader.close();
     fsDirDest.close();
 
-    // Optimize the split index
+    // Fully merge the split index
     mergePolicy = new LogByteSizeMergePolicy();
     mergePolicy.setNoCFSRatio(1);
     iwConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
@@ -142,7 +142,7 @@ public class TestIndexSplitter extends L
                    .setMergePolicy(mergePolicy);
     fsDirDest = newFSDirectory(indexSplitPath);
     indexWriter = new IndexWriter(fsDirDest, iwConfig);
-    indexWriter.optimize();
+    indexWriter.forceMerge(1);
     indexWriter.close();
     fsDirDest.close();
 

Modified: lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (original)
+++ lucene/dev/trunk/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java Fri Nov 11 19:50:51 2011
@@ -121,7 +121,7 @@ public class TestAppendingCodec extends 
     writer.addDocument(doc);
     writer.commit();
     writer.addDocument(doc);
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
     IndexReader reader = IndexReader.open(dir, null, true, 1);
     assertEquals(2, reader.numDocs());

Modified: lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java (original)
+++ lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java Fri Nov 11 19:50:51 2011
@@ -57,7 +57,7 @@ public class DuplicateFilterTest extends
 
     // Until we fix LUCENE-2348, the index must
     // have only 1 segment:
-    writer.optimize();
+    writer.forceMerge(1);
 
     reader = writer.getReader();
     writer.close();

Modified: lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java (original)
+++ lucene/dev/trunk/lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java Fri Nov 11 19:50:51 2011
@@ -68,7 +68,7 @@ public class TestSpanRegexQuery extends 
     doc = new Document();
     doc.add(newField("field", "first auto update", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
 
     IndexSearcher searcher = new IndexSearcher(directory, true);
@@ -98,14 +98,14 @@ public class TestSpanRegexQuery extends 
     IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
     writerA.addDocument(lDoc);
-    writerA.optimize();
+    writerA.forceMerge(1);
     writerA.close();
 
     // creating second index writer
     IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
     writerB.addDocument(lDoc2);
-    writerB.optimize();
+    writerB.forceMerge(1);
     writerB.close();
   }
 }

Modified: lucene/dev/trunk/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (original)
+++ lucene/dev/trunk/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java Fri Nov 11 19:50:51 2011
@@ -141,7 +141,7 @@ public class TestCartesian extends Lucen
    
     writer.commit();
     // TODO: fix CustomScoreQuery usage in testRange/testGeoHashRange so we don't need this.
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
   }
 

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/DirectoryReader.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/DirectoryReader.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/DirectoryReader.java Fri Nov 11 19:50:51 2011
@@ -523,16 +523,6 @@ class DirectoryReader extends IndexReade
     subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
   }
 
-  /**
-   * Checks is the index is optimized (if it has a single segment and no deletions)
-   * @return <code>true</code> if the index is optimized; <code>false</code> otherwise
-   */
-  @Override
-  public boolean isOptimized() {
-    ensureOpen();
-    return segmentInfos.size() == 1 && !hasDeletions();
-  }
-
   @Override
   public int numDocs() {
     // Don't call ensureOpen() here (it could affect performance)
@@ -953,8 +943,8 @@ class DirectoryReader extends IndexReade
     Directory dir;
     long generation;
     long version;
-    final boolean isOptimized;
     final Map<String,String> userData;
+    private final int segmentCount;
 
     ReaderCommit(SegmentInfos infos, Directory dir) throws IOException {
       segmentsFileName = infos.getCurrentSegmentFileName();
@@ -963,7 +953,7 @@ class DirectoryReader extends IndexReade
       files = Collections.unmodifiableCollection(infos.files(dir, true));
       version = infos.getVersion();
       generation = infos.getGeneration();
-      isOptimized = infos.size() == 1 && !infos.info(0).hasDeletions();
+      segmentCount = infos.size();
     }
 
     @Override
@@ -972,8 +962,8 @@ class DirectoryReader extends IndexReade
     }
 
     @Override
-    public boolean isOptimized() {
-      return isOptimized;
+    public int getSegmentCount() {
+      return segmentCount;
     }
 
     @Override

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java Fri Nov 11 19:50:51 2011
@@ -431,12 +431,6 @@ public class FilterIndexReader extends I
   }
   
   @Override
-  public boolean isOptimized() {
-    ensureOpen();
-    return in.isOptimized();
-  }
-  
-  @Override
   public IndexReader[] getSequentialSubReaders() {
     return in.getSequentialSubReaders();
   }

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexCommit.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexCommit.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexCommit.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexCommit.java Fri Nov 11 19:50:51 2011
@@ -75,8 +75,8 @@ public abstract class IndexCommit implem
 
   public abstract boolean isDeleted();
 
-  /** Returns true if this commit is an optimized index. */
-  public abstract boolean isOptimized();
+  /** Returns number of segments referenced by this commit. */
+  public abstract int getSegmentCount();
 
   /** Two IndexCommits are equal if both their Directory and versions are equal. */
   @Override

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java Fri Nov 11 19:50:51 2011
@@ -19,11 +19,9 @@ package org.apache.lucene.index;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -653,8 +651,8 @@ final class IndexFileDeleter {
     Collection<CommitPoint> commitsToDelete;
     long version;
     long generation;
-    final boolean isOptimized;
     final Map<String,String> userData;
+    private final int segmentCount;
 
     public CommitPoint(Collection<CommitPoint> commitsToDelete, Directory directory, SegmentInfos segmentInfos) throws IOException {
       this.directory = directory;
@@ -664,7 +662,7 @@ final class IndexFileDeleter {
       version = segmentInfos.getVersion();
       generation = segmentInfos.getGeneration();
       files = Collections.unmodifiableCollection(segmentInfos.files(directory, true));
-      isOptimized = segmentInfos.size() == 1 && !segmentInfos.info(0).hasDeletions();
+      segmentCount = segmentInfos.size();
     }
 
     @Override
@@ -673,8 +671,8 @@ final class IndexFileDeleter {
     }
 
     @Override
-    public boolean isOptimized() {
-      return isOptimized;
+    public int getSegmentCount() {
+      return segmentCount;
     }
 
     @Override

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexReader.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexReader.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexReader.java Fri Nov 11 19:50:51 2011
@@ -28,7 +28,6 @@ import java.util.concurrent.atomic.Atomi
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DocumentStoredFieldVisitor;
-import org.apache.lucene.index.codecs.PostingsFormat;
 import org.apache.lucene.index.codecs.PerDocValues;
 import org.apache.lucene.index.values.IndexDocValues;
 import org.apache.lucene.search.FieldCache; // javadocs
@@ -805,16 +804,6 @@ public abstract class IndexReader implem
   }
 
   /**
-   * Checks is the index is optimized (if it has a single segment and 
-   * no deletions).  Not implemented in the IndexReader base class.
-   * @return <code>true</code> if the index is optimized; <code>false</code> otherwise
-   * @throws UnsupportedOperationException unless overridden in subclass
-   */
-  public boolean isOptimized() {
-    throw new UnsupportedOperationException("This reader does not support this method.");
-  }
-  
-  /**
    * Return an array of term frequency vectors for the specified document.
    * The array contains a vector for each vectorized field in the document.
    * Each vector contains terms and frequencies for all terms in a given vectorized field.

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java Fri Nov 11 19:50:51 2011
@@ -35,7 +35,7 @@ import java.util.Collection;
   *  java -cp lucene-core.jar org.apache.lucene.index.IndexUpgrader [-delete-prior-commits] [-verbose] indexDir
   * </pre>
   * Alternatively this class can be instantiated and {@link #upgrade} invoked. It uses {@link UpgradeIndexMergePolicy}
-  * and triggers the upgrade via an optimize request to {@link IndexWriter}.
+  * and triggers the upgrade via an forceMerge request to {@link IndexWriter}.
   * <p>This tool keeps only the last commit in an index; for this
   * reason, if the incoming index has more than one commit, the tool
   * refuses to run by default. Specify {@code -delete-prior-commits}
@@ -45,7 +45,7 @@ import java.util.Collection;
   * <p><b>Warning:</b> This tool may reorder documents if the index was partially
   * upgraded before execution (e.g., documents were added). If your application relies
   * on &quot;monotonicity&quot; of doc IDs (which means that the order in which the documents
-  * were added to the index is preserved), do a full optimize instead.
+  * were added to the index is preserved), do a full forceMerge instead.
   * The {@link MergePolicy} set by {@link IndexWriterConfig} may also reorder
   * documents.
   */
@@ -134,7 +134,7 @@ public final class IndexUpgrader {
       if (infoStream != null) {
         infoStream.message("IndexUpgrader", "Upgrading all pre-" + Constants.LUCENE_MAIN_VERSION + " segments of index directory '" + dir + "' to version " + Constants.LUCENE_MAIN_VERSION + "...");
       }
-      w.optimize();
+      w.forceMerge(1);
       if (infoStream != null) {
         infoStream.message("IndexUpgrader", "All segments upgraded to version " + Constants.LUCENE_MAIN_VERSION);
       }

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriter.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriter.java Fri Nov 11 19:50:51 2011
@@ -101,11 +101,6 @@ import org.apache.lucene.util.TwoPhaseCo
   addDocument calls (see <a href="#mergePolicy">below</a>
   for changing the {@link MergeScheduler}).</p>
 
-  <p>If an index will not have more documents added for a while and optimal search
-  performance is desired, then either the full {@link #optimize() optimize}
-  method or partial {@link #optimize(int)} method should be
-  called before the index is closed.</p>
-
   <p>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open
   another <code>IndexWriter</code> on the same directory will lead to a
   {@link LockObtainFailedException}. The {@link LockObtainFailedException}
@@ -134,9 +129,8 @@ import org.apache.lucene.util.TwoPhaseCo
   The {@link MergePolicy} is invoked whenever there are
   changes to the segments in the index.  Its role is to
   select which merges to do, if any, and return a {@link
-  MergePolicy.MergeSpecification} describing the merges.  It
-  also selects merges to do for optimize().  (The default is
-  {@link LogByteSizeMergePolicy}.  Then, the {@link
+  MergePolicy.MergeSpecification} describing the merges.
+  The default is {@link LogByteSizeMergePolicy}.  Then, the {@link
   MergeScheduler} is invoked with the requested merges and
   it decides when and how to run the merges.  The default is
   {@link ConcurrentMergeScheduler}. </p>
@@ -223,8 +217,9 @@ public class IndexWriter implements Clos
   private DocumentsWriter docWriter;
   final IndexFileDeleter deleter;
 
-  private Map<SegmentInfo,Boolean> segmentsToOptimize = new HashMap<SegmentInfo,Boolean>();           // used by optimize to note those needing optimization
-  private int optimizeMaxNumSegments;
+  // used by forceMerge to note those needing merging
+  private Map<SegmentInfo,Boolean> segmentsToMerge = new HashMap<SegmentInfo,Boolean>();
+  private int mergeMaxNumSegments;
 
   private Lock writeLock;
 
@@ -1215,7 +1210,7 @@ public class IndexWriter implements Clos
    * readers/searchers are open against the index, and up to
    * 2X the size of all segments being merged when
    * readers/searchers are open against the index (see
-   * {@link #optimize()} for details). The sequence of
+   * {@link #forceMerge(int)} for details). The sequence of
    * primitive merge operations performed is governed by the
    * merge policy.
    *
@@ -1565,55 +1560,52 @@ public class IndexWriter implements Clos
   final InfoStream infoStream;
 
   /**
-   * Requests an "optimize" operation on an index, priming the index
-   * for the fastest available search. Traditionally this has meant
-   * merging all segments into a single segment as is done in the
-   * default merge policy, but individual merge policies may implement
-   * optimize in different ways.
-   *
-   * <p> Optimize is a very costly operation, so you
-   * should only do it if your search performance really
-   * requires it.  Many search applications do fine never
-   * calling optimize. </p>
+   * Forces merge policy to merge segments until there's <=
+   * maxNumSegments.  The actual merges to be
+   * executed are determined by the {@link MergePolicy}.
+   *
+   * <p>This is a horribly costly operation, especially when
+   * you pass a small {@code maxNumSegments}; usually you
+   * should only call this if the index is static (will no
+   * longer be changed).</p>
    *
-   * <p>Note that optimize requires 2X the index size free
+   * <p>Note that this requires up to 2X the index size free
    * space in your Directory (3X if you're using compound
    * file format).  For example, if your index size is 10 MB
-   * then you need 20 MB free for optimize to complete (30
+   * then you need up to 20 MB free for this to complete (30
    * MB if you're using compound file format).  Also,
-   * it's best to call {@link #commit()} after the optimize
-   * completes to allow IndexWriter to free up disk space.</p>
+   * it's best to call {@link #commit()} afterwards,
+   * to allow IndexWriter to free up disk space.</p>
    *
-   * <p>If some but not all readers re-open while an
-   * optimize is underway, this will cause > 2X temporary
+   * <p>If some but not all readers re-open while merging
+   * is underway, this will cause > 2X temporary
    * space to be consumed as those new readers will then
-   * hold open the partially optimized segments at that
-   * time.  It is best not to re-open readers while optimize
-   * is running.</p>
+   * hold open the temporary segments at that time.  It is
+   * best not to re-open readers while merging is running.</p>
    *
    * <p>The actual temporary usage could be much less than
    * these figures (it depends on many factors).</p>
    *
-   * <p>In general, once the optimize completes, the total size of the
+   * <p>In general, once the this completes, the total size of the
    * index will be less than the size of the starting index.
    * It could be quite a bit smaller (if there were many
    * pending deletes) or just slightly smaller.</p>
    *
-   * <p>If an Exception is hit during optimize(), for example
+   * <p>If an Exception is hit, for example
    * due to disk full, the index will not be corrupt and no
    * documents will have been lost.  However, it may have
-   * been partially optimized (some segments were merged but
+   * been partially merged (some segments were merged but
    * not all), and it's possible that one of the segments in
    * the index will be in non-compound format even when
    * using compound file format.  This will occur when the
    * Exception is hit during conversion of the segment into
    * compound format.</p>
    *
-   * <p>This call will optimize those segments present in
+   * <p>This call will merge those segments present in
    * the index when the call started.  If other threads are
    * still adding documents and flushing segments, those
-   * newly created segments will not be optimized unless you
-   * call optimize again.</p>
+   * newly created segments will not be merged unless you
+   * call forceMerge again.</p>
    *
    * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
    * you should immediately close the writer.  See <a
@@ -1626,96 +1618,66 @@ public class IndexWriter implements Clos
    *
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
-   * @see MergePolicy#findMergesForOptimize
-  */
-  public void optimize() throws CorruptIndexException, IOException {
-    optimize(true);
-  }
-
-  /**
-   * Optimize the index down to <= maxNumSegments.  If
-   * maxNumSegments==1 then this is the same as {@link
-   * #optimize()}.
-   *
-   * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
-   * you should immediately close the writer.  See <a
-   * href="#OOME">above</a> for details.</p>
+   * @see MergePolicy#findMerges
    *
    * @param maxNumSegments maximum number of segments left
-   * in the index after optimization finishes
-   */
-  public void optimize(int maxNumSegments) throws CorruptIndexException, IOException {
-    optimize(maxNumSegments, true);
-  }
-
-  /** Just like {@link #optimize()}, except you can specify
-   *  whether the call should block until the optimize
-   *  completes.  This is only meaningful with a
-   *  {@link MergeScheduler} that is able to run merges in
-   *  background threads.
-   *
-   * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
-   * you should immediately close the writer.  See <a
-   * href="#OOME">above</a> for details.</p>
-   */
-  public void optimize(boolean doWait) throws CorruptIndexException, IOException {
-    optimize(1, doWait);
+   * in the index after merging finishes
+  */
+  public void forceMerge(int maxNumSegments) throws CorruptIndexException, IOException {
+    forceMerge(maxNumSegments, true);
   }
 
-  /** Just like {@link #optimize(int)}, except you can
-   *  specify whether the call should block until the
-   *  optimize completes.  This is only meaningful with a
+  /** Just like {@link #forceMerge(int)}, except you can
+   *  specify whether the call should block until
+   *  all merging completes.  This is only meaningful with a
    *  {@link MergeScheduler} that is able to run merges in
    *  background threads.
    *
-   * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
-   * you should immediately close the writer.  See <a
-   * href="#OOME">above</a> for details.</p>
+   *  <p><b>NOTE</b>: if this method hits an OutOfMemoryError
+   *  you should immediately close the writer.  See <a
+   *  href="#OOME">above</a> for details.</p>
    */
-  public void optimize(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException {
+  public void forceMerge(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException {
     ensureOpen();
 
     if (maxNumSegments < 1)
       throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
 
     if (infoStream != null) {
-      infoStream.message("IW", "optimize: index now " + segString());
-      infoStream.message("IW", "now flush at optimize");
+      infoStream.message("IW", "forceMerge: index now " + segString());
+      infoStream.message("IW", "now flush at forceMerge");
     }
 
     flush(true, true);
 
     synchronized(this) {
       resetMergeExceptions();
-      segmentsToOptimize.clear();
+      segmentsToMerge.clear();
       for(SegmentInfo info : segmentInfos) {
-        segmentsToOptimize.put(info, Boolean.TRUE);
+        segmentsToMerge.put(info, Boolean.TRUE);
       }
-      optimizeMaxNumSegments = maxNumSegments;
+      mergeMaxNumSegments = maxNumSegments;
 
-      // Now mark all pending & running merges as optimize
-      // merge:
+      // Now mark all pending & running merges as isMaxNumSegments:
       for(final MergePolicy.OneMerge merge  : pendingMerges) {
-        merge.optimize = true;
-        merge.maxNumSegmentsOptimize = maxNumSegments;
-        segmentsToOptimize.put(merge.info, Boolean.TRUE);
+        merge.maxNumSegments = maxNumSegments;
+        segmentsToMerge.put(merge.info, Boolean.TRUE);
       }
 
       for ( final MergePolicy.OneMerge merge: runningMerges ) {
-        merge.optimize = true;
-        merge.maxNumSegmentsOptimize = maxNumSegments;
-        segmentsToOptimize.put(merge.info, Boolean.TRUE);
+        merge.maxNumSegments = maxNumSegments;
+        segmentsToMerge.put(merge.info, Boolean.TRUE);
       }
     }
 
-    maybeMerge(maxNumSegments, true);
+    maybeMerge(maxNumSegments);
 
     if (doWait) {
       synchronized(this) {
         while(true) {
 
           if (hitOOM) {
-            throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete optimize");
+            throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete forceMerge");
           }
 
           if (mergeExceptions.size() > 0) {
@@ -1724,7 +1686,7 @@ public class IndexWriter implements Clos
             final int size = mergeExceptions.size();
             for(int i=0;i<size;i++) {
               final MergePolicy.OneMerge merge = mergeExceptions.get(i);
-              if (merge.optimize) {
+              if (merge.maxNumSegments != -1) {
                 IOException err = new IOException("background merge hit exception: " + merge.segString(directory));
                 final Throwable t = merge.getException();
                 if (t != null)
@@ -1734,7 +1696,7 @@ public class IndexWriter implements Clos
             }
           }
 
-          if (optimizeMergesPending())
+          if (maxNumSegmentsMergesPending())
             doWait();
           else
             break;
@@ -1743,26 +1705,26 @@ public class IndexWriter implements Clos
 
       // If close is called while we are still
       // running, throw an exception so the calling
-      // thread will know the optimize did not
+      // thread will know merging did not
       // complete
       ensureOpen();
     }
 
     // NOTE: in the ConcurrentMergeScheduler case, when
     // doWait is false, we can return immediately while
-    // background threads accomplish the optimization
+    // background threads accomplish the merging
   }
 
   /** Returns true if any merges in pendingMerges or
-   *  runningMerges are optimization merges. */
-  private synchronized boolean optimizeMergesPending() {
+   *  runningMerges are maxNumSegments merges. */
+  private synchronized boolean maxNumSegmentsMergesPending() {
     for (final MergePolicy.OneMerge merge : pendingMerges) {
-      if (merge.optimize)
+      if (merge.maxNumSegments != -1)
         return true;
     }
 
     for (final MergePolicy.OneMerge merge : runningMerges) {
-      if (merge.optimize)
+      if (merge.maxNumSegments != -1)
         return true;
     }
 
@@ -1841,7 +1803,7 @@ public class IndexWriter implements Clos
 
     // NOTE: in the ConcurrentMergeScheduler case, when
     // doWait is false, we can return immediately while
-    // background threads accomplish the optimization
+    // background threads accomplish the merging
   }
 
 
@@ -1854,14 +1816,14 @@ public class IndexWriter implements Clos
    *
    *  <p>When an index
    *  has many document deletions (or updates to existing
-   *  documents), it's best to either call optimize or
+   *  documents), it's best to either call forceMerge or
    *  expungeDeletes to remove all unused data in the index
    *  associated with the deleted documents.  To see how
    *  many deletions you have pending in your index, call
    *  {@link IndexReader#numDeletedDocs}
    *  This saves disk space and memory usage while
    *  searching.  expungeDeletes should be somewhat faster
-   *  than optimize since it does not insist on reducing the
+   *  than forceMerge since it does not insist on reducing the
    *  index to a single segment (though, this depends on the
    *  {@link MergePolicy}; see {@link
    *  MergePolicy#findMergesToExpungeDeletes}.). Note that
@@ -1896,22 +1858,18 @@ public class IndexWriter implements Clos
    * href="#OOME">above</a> for details.</p>
    */
   public final void maybeMerge() throws CorruptIndexException, IOException {
-    maybeMerge(false);
-  }
-
-  private final void maybeMerge(boolean optimize) throws CorruptIndexException, IOException {
-    maybeMerge(1, optimize);
+    maybeMerge(-1);
   }
 
-  private final void maybeMerge(int maxNumSegmentsOptimize, boolean optimize) throws CorruptIndexException, IOException {
+  private final void maybeMerge(int maxNumSegments) throws CorruptIndexException, IOException {
     ensureOpen(false);
-    updatePendingMerges(maxNumSegmentsOptimize, optimize);
+    updatePendingMerges(maxNumSegments);
     mergeScheduler.merge(this);
   }
 
-  private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize)
+  private synchronized void updatePendingMerges(int maxNumSegments)
     throws CorruptIndexException, IOException {
-    assert !optimize || maxNumSegmentsOptimize > 0;
+    assert maxNumSegments == -1 || maxNumSegments > 0;
 
     if (stopMerges) {
       return;
@@ -1923,14 +1881,13 @@ public class IndexWriter implements Clos
     }
 
     final MergePolicy.MergeSpecification spec;
-    if (optimize) {
-      spec = mergePolicy.findMergesForOptimize(segmentInfos, maxNumSegmentsOptimize, Collections.unmodifiableMap(segmentsToOptimize));
+    if (maxNumSegments != -1) {
+      spec = mergePolicy.findForcedMerges(segmentInfos, maxNumSegments, Collections.unmodifiableMap(segmentsToMerge));
       if (spec != null) {
         final int numMerges = spec.merges.size();
         for(int i=0;i<numMerges;i++) {
           final MergePolicy.OneMerge merge = spec.merges.get(i);
-          merge.optimize = true;
-          merge.maxNumSegmentsOptimize = maxNumSegmentsOptimize;
+          merge.maxNumSegments = maxNumSegments;
         }
       }
 
@@ -2083,7 +2040,7 @@ public class IndexWriter implements Clos
    *
    * <p>NOTE: this method will forcefully abort all merges
    *    in progress.  If other threads are running {@link
-   *    #optimize()}, {@link #addIndexes(IndexReader[])} or
+   *    #forceMerge}, {@link #addIndexes(IndexReader[])} or
    *    {@link #expungeDeletes} methods, they may receive
    *    {@link MergePolicy.MergeAbortedException}s.
    */
@@ -2390,7 +2347,7 @@ public class IndexWriter implements Clos
    * (including the starting index). If readers/searchers
    * are open against the starting index, then temporary
    * free space required will be higher by the size of the
-   * starting index (see {@link #optimize()} for details).
+   * starting index (see {@link #forceMerge(int)} for details).
    *
    * <p>
    * <b>NOTE:</b> this method only copies the segments of the incoming indexes
@@ -2452,7 +2409,7 @@ public class IndexWriter implements Clos
                 && versionComparator.compare(info.getVersion(), "3.1") >= 0;
           }
           
-          IOContext context = new IOContext(new MergeInfo(info.docCount, info.sizeInBytes(true), true, false));
+          IOContext context = new IOContext(new MergeInfo(info.docCount, info.sizeInBytes(true), true, -1));
           
           if (createCFS) {
             copySegmentIntoCFS(info, newSegName, context);
@@ -2476,7 +2433,6 @@ public class IndexWriter implements Clos
   }
 
   /** Merges the provided indexes into this index.
-   * <p>After this completes, the index is optimized. </p>
    * <p>The provided IndexReaders are not closed.</p>
    *
    * <p><b>NOTE:</b> while this is running, any attempts to
@@ -2512,7 +2468,7 @@ public class IndexWriter implements Clos
       for (IndexReader indexReader : readers) {
         numDocs += indexReader.numDocs();
        }
-       final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, false));      
+       final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1));
 
       // TODO: somehow we should fix this merge so it's
       // abortable so that IW.close(false) is able to stop it
@@ -2789,7 +2745,7 @@ public class IndexWriter implements Clos
 
   /**
    * <p>Commits all pending changes (added & deleted
-   * documents, optimizations, segment merges, added
+   * documents, segment merges, added
    * indexes, etc.) to the index, and syncs all referenced
    * index files, such that a reader will see the changes
    * and the index updates will survive an OS or machine
@@ -3199,10 +3155,10 @@ public class IndexWriter implements Clos
     // disk, updating SegmentInfo, etc.:
     readerPool.clear(merge.segments);
 
-    if (merge.optimize) {
-      // cascade the optimize:
-      if (!segmentsToOptimize.containsKey(merge.info)) {
-        segmentsToOptimize.put(merge.info, Boolean.FALSE);
+    if (merge.maxNumSegments != -1) {
+      // cascade the forceMerge:
+      if (!segmentsToMerge.containsKey(merge.info)) {
+        segmentsToMerge.put(merge.info, Boolean.FALSE);
       }
     }
 
@@ -3216,7 +3172,7 @@ public class IndexWriter implements Clos
     }
 
     // Set the exception on the merge, so if
-    // optimize() is waiting on us it sees the root
+    // forceMerge is waiting on us it sees the root
     // cause exception:
     merge.setException(t);
     addMergeException(merge);
@@ -3283,8 +3239,8 @@ public class IndexWriter implements Clos
           // This merge (and, generally, any change to the
           // segments) may now enable new merges, so we call
           // merge policy & update pending merges.
-          if (success && !merge.isAborted() && (merge.optimize || (!closed && !closing))) {
-            updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
+          if (success && !merge.isAborted() && (merge.maxNumSegments != -1 || (!closed && !closing))) {
+            updatePendingMerges(merge.maxNumSegments);
           }
         }
       }
@@ -3328,9 +3284,8 @@ public class IndexWriter implements Clos
       if (info.dir != directory) {
         isExternal = true;
       }
-      if (segmentsToOptimize.containsKey(info)) {
-        merge.optimize = true;
-        merge.maxNumSegmentsOptimize = optimizeMaxNumSegments;
+      if (segmentsToMerge.containsKey(info)) {
+        merge.maxNumSegments = mergeMaxNumSegments;
       }
     }
 
@@ -3393,7 +3348,7 @@ public class IndexWriter implements Clos
     assert testPoint("startMergeInit");
 
     assert merge.registerDone;
-    assert !merge.optimize || merge.maxNumSegmentsOptimize > 0;
+    assert merge.maxNumSegments == -1 || merge.maxNumSegments > 0;
 
     if (hitOOM) {
       throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot merge");
@@ -3443,7 +3398,7 @@ public class IndexWriter implements Clos
     // Lock order: IW -> BD
     bufferedDeletesStream.prune(segmentInfos);
     Map<String,String> details = new HashMap<String,String>();
-    details.put("optimize", Boolean.toString(merge.optimize));
+    details.put("mergeMaxNumSegments", ""+merge.maxNumSegments);
     details.put("mergeFactor", Integer.toString(merge.segments.size()));
     setDiagnostics(merge.info, "merge", details);
 
@@ -3495,7 +3450,7 @@ public class IndexWriter implements Clos
    *  the synchronized lock on IndexWriter instance. */
   final synchronized void mergeFinish(MergePolicy.OneMerge merge) throws IOException {
 
-    // Optimize, addIndexes or finishMerges may be waiting
+    // forceMerge, addIndexes or finishMerges may be waiting
     // on merges to finish.
     notifyAll();
 
@@ -4090,7 +4045,7 @@ public class IndexWriter implements Clos
    * <b>NOTE:</b> the set {@link PayloadProcessorProvider} will be in effect
    * immediately, potentially for already running merges too. If you want to be
    * sure it is used for further operations only, such as {@link #addIndexes} or
-   * {@link #optimize}, you can call {@link #waitForMerges()} before.
+   * {@link #forceMerge}, you can call {@link #waitForMerges()} before.
    */
   public void setPayloadProcessorProvider(PayloadProcessorProvider pcp) {
     ensureOpen();

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java Fri Nov 11 19:50:51 2011
@@ -518,7 +518,7 @@ public final class IndexWriterConfig imp
    * Expert: {@link MergePolicy} is invoked whenever there are changes to the
    * segments in the index. Its role is to select which merges to do, if any,
    * and return a {@link MergePolicy.MergeSpecification} describing the merges.
-   * It also selects merges to do for optimize(). (The default is
+   * It also selects merges to do for forceMerge. (The default is
    * {@link LogByteSizeMergePolicy}.
    *
    * <p>Only takes effect when IndexWriter is first created. */

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java Fri Nov 11 19:50:51 2011
@@ -31,13 +31,13 @@ public class LogByteSizeMergePolicy exte
   public static final double DEFAULT_MAX_MERGE_MB = 2048;
 
   /** Default maximum segment size.  A segment of this size
-   *  or larger will never be merged during optimize.  @see setMaxMergeMBForOptimize */
-  public static final double DEFAULT_MAX_MERGE_MB_FOR_OPTIMIZE = Long.MAX_VALUE;
+   *  or larger will never be merged during forceMerge.  @see setMaxMergeMBForForceMerge */
+  public static final double DEFAULT_MAX_MERGE_MB_FOR_MERGE_IF_NEEDED = Long.MAX_VALUE;
 
   public LogByteSizeMergePolicy() {
     minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024);
     maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024);
-    maxMergeSizeForOptimize = (long) (DEFAULT_MAX_MERGE_MB_FOR_OPTIMIZE*1024*1024);
+    maxMergeSizeForForcedMerge = (long) (DEFAULT_MAX_MERGE_MB_FOR_MERGE_IF_NEEDED*1024*1024);
   }
   
   @Override
@@ -70,19 +70,19 @@ public class LogByteSizeMergePolicy exte
 
   /** <p>Determines the largest segment (measured by total
    *  byte size of the segment's files, in MB) that may be
-   *  merged with other segments during optimize. Setting
+   *  merged with other segments during forceMerge. Setting
    *  it low will leave the index with more than 1 segment,
-   *  even if {@link IndexWriter#optimize()} is called.*/
-  public void setMaxMergeMBForOptimize(double mb) {
-    maxMergeSizeForOptimize = (long) (mb*1024*1024);
+   *  even if {@link IndexWriter#forceMerge} is called.*/
+  public void setMaxMergeMBForForcedMerge(double mb) {
+    maxMergeSizeForForcedMerge = (long) (mb*1024*1024);
   }
 
   /** Returns the largest segment (measured by total byte
    *  size of the segment's files, in MB) that may be merged
-   *  with other segments during optimize.
-   *  @see #setMaxMergeMBForOptimize */
-  public double getMaxMergeMBForOptimize() {
-    return ((double) maxMergeSizeForOptimize)/1024/1024;
+   *  with other segments during forceMerge.
+   *  @see #setMaxMergeMBForForcedMerge */
+  public double getMaxMergeMBForForcedMerge() {
+    return ((double) maxMergeSizeForForcedMerge)/1024/1024;
   }
 
   /** Sets the minimum size for the lowest level segments.

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java Fri Nov 11 19:50:51 2011
@@ -31,10 +31,10 @@ public class LogDocMergePolicy extends L
   public LogDocMergePolicy() {
     minMergeSize = DEFAULT_MIN_MERGE_DOCS;
     
-    // maxMergeSize(ForOptimize) are never used by LogDocMergePolicy; set
+    // maxMergeSize(ForForcedMerge) are never used by LogDocMergePolicy; set
     // it to Long.MAX_VALUE to disable it
     maxMergeSize = Long.MAX_VALUE;
-    maxMergeSizeForOptimize = Long.MAX_VALUE;
+    maxMergeSizeForForcedMerge = Long.MAX_VALUE;
   }
 
   @Override

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java Fri Nov 11 19:50:51 2011
@@ -70,7 +70,7 @@ public abstract class LogMergePolicy ext
   protected long maxMergeSize;
   // Although the core MPs set it explicitly, we must default in case someone
   // out there wrote his own LMP ...
-  protected long maxMergeSizeForOptimize = Long.MAX_VALUE;
+  protected long maxMergeSizeForForcedMerge = Long.MAX_VALUE;
   protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
 
   protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
@@ -123,10 +123,10 @@ public abstract class LogMergePolicy ext
 
   /** Determines how often segment indices are merged by
    * addDocument().  With smaller values, less RAM is used
-   * while indexing, and searches on unoptimized indices are
+   * while indexing, and searches are
    * faster, but indexing speed is slower.  With larger
    * values, more RAM is used during indexing, and while
-   * searches on unoptimized indices are slower, indexing is
+   * searches is slower, indexing is
    * faster.  Thus larger values (> 10) are best for batch
    * index creation, and smaller values (< 10) for indices
    * that are interactively maintained. */
@@ -207,29 +207,29 @@ public abstract class LogMergePolicy ext
     }
   }
   
-  protected boolean isOptimized(SegmentInfos infos, int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToOptimize) throws IOException {
+  protected boolean isMerged(SegmentInfos infos, int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToMerge) throws IOException {
     final int numSegments = infos.size();
-    int numToOptimize = 0;
-    SegmentInfo optimizeInfo = null;
+    int numToMerge = 0;
+    SegmentInfo mergeInfo = null;
     boolean segmentIsOriginal = false;
-    for(int i=0;i<numSegments && numToOptimize <= maxNumSegments;i++) {
+    for(int i=0;i<numSegments && numToMerge <= maxNumSegments;i++) {
       final SegmentInfo info = infos.info(i);
-      final Boolean isOriginal = segmentsToOptimize.get(info);
+      final Boolean isOriginal = segmentsToMerge.get(info);
       if (isOriginal != null) {
         segmentIsOriginal = isOriginal;
-        numToOptimize++;
-        optimizeInfo = info;
+        numToMerge++;
+        mergeInfo = info;
       }
     }
 
-    return numToOptimize <= maxNumSegments &&
-      (numToOptimize != 1 || !segmentIsOriginal || isOptimized(optimizeInfo));
+    return numToMerge <= maxNumSegments &&
+      (numToMerge != 1 || !segmentIsOriginal || isMerged(mergeInfo));
   }
 
-  /** Returns true if this single info is optimized (has no
+  /** Returns true if this single info is already fully merged (has no
    *  pending norms or deletes, is in the same dir as the
    *  writer, and matches the current compound file setting */
-  protected boolean isOptimized(SegmentInfo info)
+  protected boolean isMerged(SegmentInfo info)
     throws IOException {
     IndexWriter w = writer.get();
     assert w != null;
@@ -241,14 +241,14 @@ public abstract class LogMergePolicy ext
   }
 
   /**
-   * Returns the merges necessary to optimize the index, taking the max merge
+   * Returns the merges necessary to merge the index, taking the max merge
    * size or max merge docs into consideration. This method attempts to respect
    * the {@code maxNumSegments} parameter, however it might be, due to size
    * constraints, that more than that number of segments will remain in the
    * index. Also, this method does not guarantee that exactly {@code
    * maxNumSegments} will remain, but &lt;= that number.
    */
-  private MergeSpecification findMergesForOptimizeSizeLimit(
+  private MergeSpecification findForcedMergesSizeLimit(
       SegmentInfos infos, int maxNumSegments, int last) throws IOException {
     MergeSpecification spec = new MergeSpecification();
     final List<SegmentInfo> segments = infos.asList();
@@ -256,14 +256,15 @@ public abstract class LogMergePolicy ext
     int start = last - 1;
     while (start >= 0) {
       SegmentInfo info = infos.info(start);
-      if (size(info) > maxMergeSizeForOptimize || sizeDocs(info) > maxMergeDocs) {
+      if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) {
         if (verbose()) {
-          message("optimize: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForOptimize + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")");
+          message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")");
         }
         // need to skip that segment + add a merge for the 'right' segments,
-        // unless there is only 1 which is optimized.
-        if (last - start - 1 > 1 || (start != last - 1 && !isOptimized(infos.info(start + 1)))) {
-          // there is more than 1 segment to the right of this one, or an unoptimized single segment.
+        // unless there is only 1 which is merged.
+        if (last - start - 1 > 1 || (start != last - 1 && !isMerged(infos.info(start + 1)))) {
+          // there is more than 1 segment to the right of
+          // this one, or a mergeable single segment.
           spec.add(new OneMerge(segments.subList(start + 1, last)));
         }
         last = start;
@@ -275,8 +276,9 @@ public abstract class LogMergePolicy ext
       --start;
     }
 
-    // Add any left-over segments, unless there is just 1 already optimized.
-    if (last > 0 && (++start + 1 < last || !isOptimized(infos.info(start)))) {
+    // Add any left-over segments, unless there is just 1
+    // already fully merged
+    if (last > 0 && (++start + 1 < last || !isMerged(infos.info(start)))) {
       spec.add(new OneMerge(segments.subList(start, last)));
     }
 
@@ -284,11 +286,11 @@ public abstract class LogMergePolicy ext
   }
   
   /**
-   * Returns the merges necessary to optimize the index. This method constraints
+   * Returns the merges necessary to forceMerge the index. This method constraints
    * the returned merges only by the {@code maxNumSegments} parameter, and
    * guaranteed that exactly that number of segments will remain in the index.
    */
-  private MergeSpecification findMergesForOptimizeMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException {
+  private MergeSpecification findForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException {
     MergeSpecification spec = new MergeSpecification();
     final List<SegmentInfo> segments = infos.asList();
 
@@ -304,9 +306,9 @@ public abstract class LogMergePolicy ext
     if (0 == spec.merges.size()) {
       if (maxNumSegments == 1) {
 
-        // Since we must optimize down to 1 segment, the
+        // Since we must merge down to 1 segment, the
         // choice is simple:
-        if (last > 1 || !isOptimized(infos.info(0))) {
+        if (last > 1 || !isMerged(infos.info(0))) {
           spec.add(new OneMerge(segments.subList(0, last)));
         }
       } else if (last > maxNumSegments) {
@@ -319,7 +321,7 @@ public abstract class LogMergePolicy ext
 
         // We must merge this many segments to leave
         // maxNumSegments in the index (from when
-        // optimize was first kicked off):
+        // forceMerge was first kicked off):
         final int finalMergeSize = last - maxNumSegments + 1;
 
         // Consider all possible starting points:
@@ -342,10 +344,9 @@ public abstract class LogMergePolicy ext
     return spec.merges.size() == 0 ? null : spec;
   }
   
-  /** Returns the merges necessary to optimize the index.
-   *  This merge policy defines "optimized" to mean only the
-   *  requested number of segments is left in the index, and
-   *  respects the {@link #maxMergeSizeForOptimize} setting.
+  /** Returns the merges necessary to merge the index down
+   *  to a specified number of segments.
+   *  This respects the {@link #maxMergeSizeForForcedMerge} setting.
    *  By default, and assuming {@code maxNumSegments=1}, only
    *  one segment will be left in the index, where that segment
    *  has no deletions pending nor separate norms, and it is in
@@ -354,30 +355,30 @@ public abstract class LogMergePolicy ext
    *  (mergeFactor at a time) so the {@link MergeScheduler}
    *  in use may make use of concurrency. */
   @Override
-  public MergeSpecification findMergesForOptimize(SegmentInfos infos,
-            int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToOptimize) throws IOException {
+  public MergeSpecification findForcedMerges(SegmentInfos infos,
+            int maxNumSegments, Map<SegmentInfo,Boolean> segmentsToMerge) throws IOException {
 
     assert maxNumSegments > 0;
     if (verbose()) {
-      message("findMergesForOptimize: maxNumSegs=" + maxNumSegments + " segsToOptimize="+ segmentsToOptimize);
+      message("findForcedMerges: maxNumSegs=" + maxNumSegments + " segsToMerge="+ segmentsToMerge);
     }
 
-    // If the segments are already optimized (e.g. there's only 1 segment), or
-    // there are <maxNumSegements, all optimized, nothing to do.
-    if (isOptimized(infos, maxNumSegments, segmentsToOptimize)) {
+    // If the segments are already merged (e.g. there's only 1 segment), or
+    // there are <maxNumSegements:.
+    if (isMerged(infos, maxNumSegments, segmentsToMerge)) {
       if (verbose()) {
-        message("already optimized; skip");
+        message("already merged; skip");
       }
       return null;
     }
 
     // Find the newest (rightmost) segment that needs to
-    // be optimized (other segments may have been flushed
-    // since optimize started):
+    // be merged (other segments may have been flushed
+    // since merging started):
     int last = infos.size();
     while (last > 0) {
       final SegmentInfo info = infos.info(--last);
-      if (segmentsToOptimize.get(info) != null) {
+      if (segmentsToMerge.get(info) != null) {
         last++;
         break;
       }
@@ -390,8 +391,8 @@ public abstract class LogMergePolicy ext
       return null;
     }
     
-    // There is only one segment already, and it is optimized
-    if (maxNumSegments == 1 && last == 1 && isOptimized(infos.info(0))) {
+    // There is only one segment already, and it is merged
+    if (maxNumSegments == 1 && last == 1 && isMerged(infos.info(0))) {
       if (verbose()) {
         message("already 1 seg; skip");
       }
@@ -402,16 +403,16 @@ public abstract class LogMergePolicy ext
     boolean anyTooLarge = false;
     for (int i = 0; i < last; i++) {
       SegmentInfo info = infos.info(i);
-      if (size(info) > maxMergeSizeForOptimize || sizeDocs(info) > maxMergeDocs) {
+      if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) {
         anyTooLarge = true;
         break;
       }
     }
 
     if (anyTooLarge) {
-      return findMergesForOptimizeSizeLimit(infos, maxNumSegments, last);
+      return findForcedMergesSizeLimit(infos, maxNumSegments, last);
     } else {
-      return findMergesForOptimizeMaxNumSegments(infos, maxNumSegments, last);
+      return findForcedMergesMaxNumSegments(infos, maxNumSegments, last);
     }
   }
 
@@ -661,7 +662,7 @@ public abstract class LogMergePolicy ext
     sb.append("minMergeSize=").append(minMergeSize).append(", ");
     sb.append("mergeFactor=").append(mergeFactor).append(", ");
     sb.append("maxMergeSize=").append(maxMergeSize).append(", ");
-    sb.append("maxMergeSizeForOptimize=").append(maxMergeSizeForOptimize).append(", ");
+    sb.append("maxMergeSizeForForcedMerge=").append(maxMergeSizeForForcedMerge).append(", ");
     sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", ");
     sb.append("maxMergeDocs=").append(maxMergeDocs).append(", ");
     sb.append("useCompoundFile=").append(useCompoundFile).append(", ");

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MergePolicy.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MergePolicy.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MergePolicy.java Fri Nov 11 19:50:51 2011
@@ -30,8 +30,7 @@ import org.apache.lucene.util.SetOnce;
 
 /**
  * <p>Expert: a MergePolicy determines the sequence of
- * primitive merge operations to be used for overall merge
- * and optimize operations.</p>
+ * primitive merge operations.</p>
  * 
  * <p>Whenever the segments in an index have been altered by
  * {@link IndexWriter}, either the addition of a newly
@@ -42,8 +41,8 @@ import org.apache.lucene.util.SetOnce;
  * merges that are now required.  This method returns a
  * {@link MergeSpecification} instance describing the set of
  * merges that should be done, or null if no merges are
- * necessary.  When IndexWriter.optimize is called, it calls
- * {@link #findMergesForOptimize} and the MergePolicy should
+ * necessary.  When IndexWriter.forceMerge is called, it calls
+ * {@link #findForcedMerges(SegmentInfos,int,Map)} and the MergePolicy should
  * then return the necessary merges.</p>
  *
  * <p>Note that the policy can return more than one merge at
@@ -69,11 +68,10 @@ public abstract class MergePolicy implem
   public static class OneMerge {
 
     SegmentInfo info;               // used by IndexWriter
-    boolean optimize;               // used by IndexWriter
     boolean registerDone;           // used by IndexWriter
     long mergeGen;                  // used by IndexWriter
     boolean isExternal;             // used by IndexWriter
-    int maxNumSegmentsOptimize;     // used by IndexWriter
+    int maxNumSegments = -1;        // used by IndexWriter
     public long estimatedMergeBytes;       // used by IndexWriter
     List<SegmentReader> readers;        // used by IndexWriter
     List<BitVector> readerLiveDocs;   // used by IndexWriter
@@ -160,8 +158,8 @@ public abstract class MergePolicy implem
       }
       if (info != null)
         b.append(" into ").append(info.name);
-      if (optimize)
-        b.append(" [optimize]");
+      if (maxNumSegments != -1)
+        b.append(" [maxNumSegments=" + maxNumSegments + "]");
       if (aborted) {
         b.append(" [ABORTED]");
       }
@@ -193,7 +191,7 @@ public abstract class MergePolicy implem
     }
     
     public MergeInfo getMergeInfo() {
-      return new MergeInfo(totalDocCount, estimatedMergeBytes, isExternal, optimize);
+      return new MergeInfo(totalDocCount, estimatedMergeBytes, isExternal, maxNumSegments);
     }    
   }
 
@@ -290,9 +288,9 @@ public abstract class MergePolicy implem
       throws CorruptIndexException, IOException;
 
   /**
-   * Determine what set of merge operations is necessary in order to optimize
-   * the index. {@link IndexWriter} calls this when its
-   * {@link IndexWriter#optimize()} method is called. This call is always
+   * Determine what set of merge operations is necessary in
+   * order to merge to <= the specified segment count. {@link IndexWriter} calls this when its
+   * {@link IndexWriter#forceMerge} method is called. This call is always
    * synchronized on the {@link IndexWriter} instance so only one thread at a
    * time will call this method.
    * 
@@ -301,17 +299,17 @@ public abstract class MergePolicy implem
    * @param maxSegmentCount
    *          requested maximum number of segments in the index (currently this
    *          is always 1)
-   * @param segmentsToOptimize
+   * @param segmentsToMerge
    *          contains the specific SegmentInfo instances that must be merged
    *          away. This may be a subset of all
    *          SegmentInfos.  If the value is True for a
    *          given SegmentInfo, that means this segment was
    *          an original segment present in the
-   *          to-be-optimized index; else, it was a segment
+   *          to-be-merged index; else, it was a segment
    *          produced by a cascaded merge.
    */
-  public abstract MergeSpecification findMergesForOptimize(
-          SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize)
+  public abstract MergeSpecification findForcedMerges(
+          SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToMerge)
       throws CorruptIndexException, IOException;
 
   /**

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiReader.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiReader.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiReader.java Fri Nov 11 19:50:51 2011
@@ -234,12 +234,6 @@ public class MultiReader extends IndexRe
   }
 
   @Override
-  public boolean isOptimized() {
-    ensureOpen();
-    return false;
-  }
-  
-  @Override
   public int numDocs() {
     // Don't call ensureOpen() here (it could affect performance)
     // NOTE: multiple threads may wind up init'ing

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java?rev=1201036&r1=1201035&r2=1201036&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java Fri Nov 11 19:50:51 2011
@@ -58,8 +58,8 @@ public final class NoMergePolicy extends
       throws CorruptIndexException, IOException { return null; }
 
   @Override
-  public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos,
-             int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize)
+  public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
+             int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToMerge)
       throws CorruptIndexException, IOException { return null; }
 
   @Override