You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sa...@apache.org on 2011/05/30 16:51:37 UTC

svn commit: r1129205 [2/7] - in /lucene/dev/branches/solr2452: ./ dev-tools/eclipse/ dev-tools/idea/.idea/ dev-tools/idea/lucene/contrib/spellchecker/ dev-tools/idea/modules/suggest/ dev-tools/maven/lucene/contrib/ dev-tools/maven/lucene/contrib/spellc...

Modified: lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java Mon May 30 14:51:25 2011
@@ -26,6 +26,7 @@ import org.apache.lucene.index.TermFreqV
 import org.apache.lucene.index.TermPositionVector;
 import org.apache.lucene.index.TermVectorOffsetInfo;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
 
 /**
  * <code>FieldTermStack</code> is a stack that keeps query terms in the specified field
@@ -80,16 +81,16 @@ public class FieldTermStack {
     Set<String> termSet = fieldQuery.getTermSet( fieldName );
     // just return to make null snippet if un-matched fieldName specified when fieldMatch == true
     if( termSet == null ) return;
-    
+    final CharsRef spare = new CharsRef();
     for( BytesRef term : tpv.getTerms() ){
-      if( !termSet.contains( term.utf8ToString() ) ) continue;
+      if( !termSet.contains( term.utf8ToChars(spare).toString() ) ) continue;
       int index = tpv.indexOf( term );
       TermVectorOffsetInfo[] tvois = tpv.getOffsets( index );
       if( tvois == null ) return; // just return to make null snippets
       int[] poss = tpv.getTermPositions( index );
       if( poss == null ) return; // just return to make null snippets
       for( int i = 0; i < tvois.length; i++ )
-        termList.add( new TermInfo( term.utf8ToString(), tvois[i].getStartOffset(), tvois[i].getEndOffset(), poss[i] ) );
+        termList.add( new TermInfo( term.utf8ToChars(spare).toString(), tvois[i].getStartOffset(), tvois[i].getEndOffset(), poss[i] ) );
     }
     
     // sort by position

Modified: lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragListBuilderTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragListBuilderTest.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragListBuilderTest.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragListBuilderTest.java Mon May 30 14:51:25 2011
@@ -24,7 +24,7 @@ public class SimpleFragListBuilderTest e
   public void testNullFieldFragList() throws Exception {
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "a", "b c d" ), 100 );
-    assertEquals( 0, ffl.fragInfos.size() );
+    assertEquals( 0, ffl.getFragInfos().size() );
   }
   
   public void testTooSmallFragSize() throws Exception {
@@ -40,90 +40,90 @@ public class SimpleFragListBuilderTest e
   public void testSmallerFragSizeThanTermQuery() throws Exception {
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "abcdefghijklmnopqrs", "abcdefghijklmnopqrs" ), SimpleFragListBuilder.MIN_FRAG_CHAR_SIZE );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(abcdefghijklmnopqrs((0,19)))/1.0(0,19)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(abcdefghijklmnopqrs((0,19)))/1.0(0,19)", ffl.getFragInfos().get( 0 ).toString() );
   }
   
   public void testSmallerFragSizeThanPhraseQuery() throws Exception {
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "\"abcdefgh jklmnopqrs\"", "abcdefgh   jklmnopqrs" ), SimpleFragListBuilder.MIN_FRAG_CHAR_SIZE );
-    assertEquals( 1, ffl.fragInfos.size() );
-    if (VERBOSE) System.out.println( ffl.fragInfos.get( 0 ).toString() );
-    assertEquals( "subInfos=(abcdefghjklmnopqrs((0,21)))/1.0(0,21)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    if (VERBOSE) System.out.println( ffl.getFragInfos().get( 0 ).toString() );
+    assertEquals( "subInfos=(abcdefghjklmnopqrs((0,21)))/1.0(0,21)", ffl.getFragInfos().get( 0 ).toString() );
   }
   
   public void test1TermIndex() throws Exception {
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "a", "a" ), 100 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((0,1)))/1.0(0,100)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((0,1)))/1.0(0,100)", ffl.getFragInfos().get( 0 ).toString() );
   }
   
   public void test2TermsIndex1Frag() throws Exception {
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "a", "a a" ), 100 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((0,1))a((2,3)))/2.0(0,100)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((0,1))a((2,3)))/2.0(0,100)", ffl.getFragInfos().get( 0 ).toString() );
   
     ffl = sflb.createFieldFragList( fpl( "a", "a b b b b b b b b a" ), 20 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((0,1))a((18,19)))/2.0(0,20)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((0,1))a((18,19)))/2.0(0,20)", ffl.getFragInfos().get( 0 ).toString() );
 
     ffl = sflb.createFieldFragList( fpl( "a", "b b b b a b b b b a" ), 20 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((8,9))a((18,19)))/2.0(2,22)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((8,9))a((18,19)))/2.0(2,22)", ffl.getFragInfos().get( 0 ).toString() );
   }
   
   public void test2TermsIndex2Frags() throws Exception {
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "a", "a b b b b b b b b b b b b b a" ), 20 );
-    assertEquals( 2, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((0,1)))/1.0(0,20)", ffl.fragInfos.get( 0 ).toString() );
-    assertEquals( "subInfos=(a((28,29)))/1.0(22,42)", ffl.fragInfos.get( 1 ).toString() );
+    assertEquals( 2, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((0,1)))/1.0(0,20)", ffl.getFragInfos().get( 0 ).toString() );
+    assertEquals( "subInfos=(a((28,29)))/1.0(22,42)", ffl.getFragInfos().get( 1 ).toString() );
 
     ffl = sflb.createFieldFragList( fpl( "a", "a b b b b b b b b b b b b a" ), 20 );
-    assertEquals( 2, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((0,1)))/1.0(0,20)", ffl.fragInfos.get( 0 ).toString() );
-    assertEquals( "subInfos=(a((26,27)))/1.0(20,40)", ffl.fragInfos.get( 1 ).toString() );
+    assertEquals( 2, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((0,1)))/1.0(0,20)", ffl.getFragInfos().get( 0 ).toString() );
+    assertEquals( "subInfos=(a((26,27)))/1.0(20,40)", ffl.getFragInfos().get( 1 ).toString() );
 
     ffl = sflb.createFieldFragList( fpl( "a", "a b b b b b b b b b a" ), 20 );
-    assertEquals( 2, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((0,1)))/1.0(0,20)", ffl.fragInfos.get( 0 ).toString() );
-    assertEquals( "subInfos=(a((20,21)))/1.0(20,40)", ffl.fragInfos.get( 1 ).toString() );
+    assertEquals( 2, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((0,1)))/1.0(0,20)", ffl.getFragInfos().get( 0 ).toString() );
+    assertEquals( "subInfos=(a((20,21)))/1.0(20,40)", ffl.getFragInfos().get( 1 ).toString() );
   }
   
   public void test2TermsQuery() throws Exception {
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "a b", "c d e" ), 20 );
-    assertEquals( 0, ffl.fragInfos.size() );
+    assertEquals( 0, ffl.getFragInfos().size() );
 
     ffl = sflb.createFieldFragList( fpl( "a b", "d b c" ), 20 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(b((2,3)))/1.0(0,20)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(b((2,3)))/1.0(0,20)", ffl.getFragInfos().get( 0 ).toString() );
 
     ffl = sflb.createFieldFragList( fpl( "a b", "a b c" ), 20 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((0,1))b((2,3)))/2.0(0,20)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((0,1))b((2,3)))/2.0(0,20)", ffl.getFragInfos().get( 0 ).toString() );
   }
   
   public void testPhraseQuery() throws Exception {
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "\"a b\"", "c d e" ), 20 );
-    assertEquals( 0, ffl.fragInfos.size() );
+    assertEquals( 0, ffl.getFragInfos().size() );
 
     ffl = sflb.createFieldFragList( fpl( "\"a b\"", "a c b" ), 20 );
-    assertEquals( 0, ffl.fragInfos.size() );
+    assertEquals( 0, ffl.getFragInfos().size() );
 
     ffl = sflb.createFieldFragList( fpl( "\"a b\"", "a b c" ), 20 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(ab((0,3)))/1.0(0,20)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(ab((0,3)))/1.0(0,20)", ffl.getFragInfos().get( 0 ).toString() );
   }
   
   public void testPhraseQuerySlop() throws Exception {
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "\"a b\"~1", "a c b" ), 20 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(ab((0,1)(4,5)))/1.0(0,20)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(ab((0,1)(4,5)))/1.0(0,20)", ffl.getFragInfos().get( 0 ).toString() );
   }
 
   private FieldPhraseList fpl( String queryValue, String indexValue ) throws Exception {
@@ -142,8 +142,8 @@ public class SimpleFragListBuilderTest e
     FieldPhraseList fpl = new FieldPhraseList( stack, fq );
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl, 100 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(d((9,10)))/1.0(3,103)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(d((9,10)))/1.0(3,103)", ffl.getFragInfos().get( 0 ).toString() );
   }
   
   public void test1PhraseLongMV() throws Exception {
@@ -154,8 +154,8 @@ public class SimpleFragListBuilderTest e
     FieldPhraseList fpl = new FieldPhraseList( stack, fq );
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl, 100 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(searchengines((102,116))searchengines((157,171)))/2.0(96,196)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(searchengines((102,116))searchengines((157,171)))/2.0(96,196)", ffl.getFragInfos().get( 0 ).toString() );
   }
 
   public void test1PhraseLongMVB() throws Exception {
@@ -166,7 +166,7 @@ public class SimpleFragListBuilderTest e
     FieldPhraseList fpl = new FieldPhraseList( stack, fq );
     SimpleFragListBuilder sflb = new SimpleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl, 100 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(sppeeeed((88,93)))/1.0(82,182)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(sppeeeed((88,93)))/1.0(82,182)", ffl.getFragInfos().get( 0 ).toString() );
   }
 }

Modified: lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SingleFragListBuilderTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SingleFragListBuilderTest.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SingleFragListBuilderTest.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SingleFragListBuilderTest.java Mon May 30 14:51:25 2011
@@ -24,21 +24,21 @@ public class SingleFragListBuilderTest e
   public void testNullFieldFragList() throws Exception {
     SingleFragListBuilder sflb = new SingleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "a", "b c d" ), 100 );
-    assertEquals( 0, ffl.fragInfos.size() );
+    assertEquals( 0, ffl.getFragInfos().size() );
   }
   
   public void testShortFieldFragList() throws Exception {
     SingleFragListBuilder sflb = new SingleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "a", "a b c d" ), 100 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((0,1)))/1.0(0,2147483647)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((0,1)))/1.0(0,2147483647)", ffl.getFragInfos().get( 0 ).toString() );
   }
   
   public void testLongFieldFragList() throws Exception {
     SingleFragListBuilder sflb = new SingleFragListBuilder();
     FieldFragList ffl = sflb.createFieldFragList( fpl( "a", "a b c d", "a b c d e f g h i", "j k l m n o p q r s t u v w x y z a b c", "d e f g" ), 100 );
-    assertEquals( 1, ffl.fragInfos.size() );
-    assertEquals( "subInfos=(a((0,1))a((8,9))a((60,61)))/3.0(0,2147483647)", ffl.fragInfos.get( 0 ).toString() );
+    assertEquals( 1, ffl.getFragInfos().size() );
+    assertEquals( "subInfos=(a((0,1))a((8,9))a((60,61)))/3.0(0,2147483647)", ffl.getFragInfos().get( 0 ).toString() );
   }
 
   private FieldPhraseList fpl( String queryValue, String... indexValues ) throws Exception {

Modified: lucene/dev/branches/solr2452/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java Mon May 30 14:51:25 2011
@@ -41,6 +41,7 @@ import org.apache.lucene.index.DocsAndPo
 import org.apache.lucene.util.BitVector;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
 
 /**
  * Represented as a coupled graph of class instances, this
@@ -228,12 +229,13 @@ public class InstantiatedIndex
     if (fieldsC != null) {
       FieldsEnum fieldsEnum = fieldsC.iterator();
       String field;
+      final CharsRef spare = new CharsRef();
       while((field = fieldsEnum.next()) != null) {
         if (fields == null || fields.contains(field)) {
           TermsEnum termsEnum = fieldsEnum.terms();
           BytesRef text;
           while((text = termsEnum.next()) != null) {
-            String termText = text.utf8ToString();
+            String termText = text.utf8ToChars(spare).toString();
             InstantiatedTerm instantiatedTerm = new InstantiatedTerm(field, termText);
             final long totalTermFreq = termsEnum.totalTermFreq();
             if (totalTermFreq != -1) {

Modified: lucene/dev/branches/solr2452/lucene/contrib/misc/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/misc/build.xml?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/misc/build.xml (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/misc/build.xml Mon May 30 14:51:25 2011
@@ -42,4 +42,26 @@
       <fileset dir="${common.dir}/../modules/analysis/common" includes="build.xml"/>
     </subant>
   </target>
+
+  <target name="build-native-unix" >
+    <mkdir dir="${common.build.dir}/native"/>
+
+    <taskdef resource="cpptasks.tasks">
+      <classpath>
+        <pathelement location="ant_lib/cpptasks-1.0b5.jar"/>
+      </classpath>
+    </taskdef>
+
+    <cc outtype="shared" subsystem="console" outfile="${common.build.dir}/native/NativePosixUtil" >
+      <fileset file="${src.dir}/org/apache/lucene/store/NativePosixUtil.cpp" />  
+      <includepath>
+        <pathelement location="${java.home}/../include"/>
+        <pathelement location="${java.home}/../include/linux"/>
+        <pathelement location="${java.home}/../include/solaris"/>
+      </includepath>
+
+      <compilerarg value="-fPIC" />
+    </cc>
+  </target>
+
 </project>

Modified: lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java Mon May 30 14:51:25 2011
@@ -26,6 +26,7 @@ import java.text.DecimalFormat;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.lucene.index.IndexWriter;  // Required for javadocs
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.store.FSDirectory;
 
@@ -45,6 +46,11 @@ import org.apache.lucene.store.FSDirecto
  * @lucene.experimental You can easily
  * accidentally remove segments from your index so be
  * careful!
+ *
+ * <p><b>NOTE</b>: this tool is unaware of documents added
+ * atomically via {@link IndexWriter#addDocuments} or {@link
+ * IndexWriter#updateDocuments}, which means it can easily
+ * break up such document groups.
  */
 public class IndexSplitter {
   public SegmentInfos infos;

Modified: lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java Mon May 30 14:51:25 2011
@@ -40,6 +40,11 @@ import org.apache.lucene.util.Version;
  * <p>Note 2: the disadvantage of this tool is that source index needs to be
  * read as many times as there are parts to be created, hence the name of this
  * tool.
+ *
+ * <p><b>NOTE</b>: this tool is unaware of documents added
+ * atomically via {@link IndexWriter#addDocuments} or {@link
+ * IndexWriter#updateDocuments}, which means it can easily
+ * break up such document groups.
  */
 public class MultiPassIndexSplitter {
   

Modified: lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/store/NRTCachingDirectory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/store/NRTCachingDirectory.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/store/NRTCachingDirectory.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/org/apache/lucene/store/NRTCachingDirectory.java Mon May 30 14:51:25 2011
@@ -269,7 +269,7 @@ public class NRTCachingDirectory extends
         in = cache.openInput(fileName);
         in.copyBytes(out, in.length());
       } finally {
-        IOUtils.closeSafely(in, out);
+        IOUtils.closeSafely(false, in, out);
       }
       synchronized(this) {
         cache.deleteFile(fileName);

Modified: lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/overview.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/overview.html?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/overview.html (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/misc/src/java/overview.html Mon May 30 14:51:25 2011
@@ -51,9 +51,11 @@ for details.
 
 Steps to build:
 <ul>
-  <li> <tt>cd lucene/contrib/misc/src/java/org/apache/lucene/store</tt>
+  <li> <tt>cd lucene/contrib/misc/</tt>
 
-  <li> Compile NativePosixUtil.cpp -> libNativePosixUtil.so.  On linux, something like <tt>gcc -fPIC -o libNativePosixUtil.so -shared -Wl,-soname,libNativePosixUtil.so  -I$JAVA_HOME/include -I$JAVA_HOME/include/linux NativePosixUtil.cpp -lc -lstdc++</tt>.  Add <tt>-m64</tt> if you want to compile 64bit (and java must be run with -d64 so it knows to load a 64bit dynamic lib).
+  <li> To compile NativePosixUtil.cpp -> libNativePosixUtil.so on Linux run<tt> ant build-native-unix</tt>.
+  
+  <li><tt>libNativePosixUtil.so</tt> will be located in the <tt>lucene/build/native/</tt> folder
 
   <li> Make sure libNativePosixUtil.so is on your LD_LIBRARY_PATH so java can find it (something like <tt>export LD_LIBRARY_PATH=/path/to/dir:$LD_LIBRARY_PATH</tt>, where /path/to/dir contains libNativePosixUtil.so)
 

Modified: lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JakartaRegexpCapabilities.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JakartaRegexpCapabilities.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JakartaRegexpCapabilities.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JakartaRegexpCapabilities.java Mon May 30 14:51:25 2011
@@ -18,6 +18,7 @@ package org.apache.lucene.search.regex;
  */
 
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.UnicodeUtil;
 import org.apache.regexp.CharacterIterator;
 import org.apache.regexp.RE;
@@ -104,11 +105,11 @@ public class JakartaRegexpCapabilities i
 
   class JakartaRegexMatcher implements RegexCapabilities.RegexMatcher {
     private RE regexp;
-    private final UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
+    private final CharsRef utf16 = new CharsRef(10);
     private final CharacterIterator utf16wrapper = new CharacterIterator() {
 
       public char charAt(int pos) {
-        return utf16.result[pos];
+        return utf16.chars[pos];
       }
 
       public boolean isEnd(int pos) {
@@ -120,7 +121,7 @@ public class JakartaRegexpCapabilities i
       }
 
       public String substring(int beginIndex, int endIndex) {
-        return new String(utf16.result, beginIndex, endIndex - beginIndex);
+        return new String(utf16.chars, beginIndex, endIndex - beginIndex);
       }
       
     };

Modified: lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java Mon May 30 14:51:25 2011
@@ -21,6 +21,7 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.UnicodeUtil;
 
 /**
@@ -95,25 +96,11 @@ public class JavaUtilRegexCapabilities i
   class JavaUtilRegexMatcher implements RegexCapabilities.RegexMatcher {
     private final Pattern pattern;
     private final Matcher matcher;
-    private final UnicodeUtil.UTF16Result utf16 = new UnicodeUtil.UTF16Result();
-    private final CharSequence utf16wrapper = new CharSequence() {
-
-      public int length() {
-        return utf16.length;
-      }
-
-      public char charAt(int index) {
-        return utf16.result[index];
-      }
-
-      public CharSequence subSequence(int start, int end) {
-        return new String(utf16.result, start, end - start);
-      }  
-    };
+    private final CharsRef utf16 = new CharsRef(10);
     
     public JavaUtilRegexMatcher(String regex, int flags) {
       this.pattern = Pattern.compile(regex, flags);
-      this.matcher = this.pattern.matcher(utf16wrapper);
+      this.matcher = this.pattern.matcher(utf16);
     }
     
     public boolean match(BytesRef term) {

Modified: lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java Mon May 30 14:51:25 2011
@@ -48,6 +48,7 @@ import org.apache.lucene.search.TermQuer
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.PriorityQueue;
 
 
@@ -850,8 +851,9 @@ public final class MoreLikeThis {
 	{
 		BytesRef[] terms = vector.getTerms();
 		int freqs[]=vector.getTermFrequencies();
+		final CharsRef spare = new CharsRef();
 		for (int j = 0; j < terms.length; j++) {
-		    String term = terms[j].utf8ToString();
+		  final String term = terms[j].utf8ToChars(spare).toString();
 		
 			if(isNoiseWord(term)){
 				continue;

Modified: lucene/dev/branches/solr2452/lucene/contrib/xml-query-parser/dtddocbuild.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/contrib/xml-query-parser/dtddocbuild.xml?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/contrib/xml-query-parser/dtddocbuild.xml (original)
+++ lucene/dev/branches/solr2452/lucene/contrib/xml-query-parser/dtddocbuild.xml Mon May 30 14:51:25 2011
@@ -1,5 +1,22 @@
 <?xml version="1.0"?>
 
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+
 <project name="DTDDocAnt" default="main">
 
   <import file="../contrib-build.xml"/>

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/document/CompressionTools.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/document/CompressionTools.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/document/CompressionTools.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/document/CompressionTools.java Mon May 30 14:51:25 2011
@@ -23,6 +23,7 @@ import java.util.zip.DataFormatException
 import java.io.ByteArrayOutputStream;
 
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.UnicodeUtil;
 
 /** Simple utility class providing static methods to
@@ -118,9 +119,9 @@ public class CompressionTools {
   /** Decompress the byte array previously returned by
    *  compressString back into a String */
   public static String decompressString(byte[] value) throws DataFormatException {
-    UnicodeUtil.UTF16Result result = new UnicodeUtil.UTF16Result();
     final byte[] bytes = decompress(value);
+    CharsRef result = new CharsRef(bytes.length);
     UnicodeUtil.UTF8toUTF16(bytes, 0, bytes.length, result);
-    return new String(result.result, 0, result.length);
+    return new String(result.chars, 0, result.length);
   }
 }

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/CompoundFileWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/CompoundFileWriter.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/CompoundFileWriter.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/CompoundFileWriter.java Mon May 30 14:51:25 2011
@@ -60,6 +60,9 @@ public final class CompoundFileWriter {
 
         /** temporary holder for the start of this file's data section */
         long dataOffset;
+        
+        /** the directory which contains the file. */
+        Directory dir;
     }
 
     // Before versioning started.
@@ -119,6 +122,14 @@ public final class CompoundFileWriter {
      *   has been added already
      */
     public void addFile(String file) {
+      addFile(file, directory);
+    }
+
+    /**
+     * Same as {@link #addFile(String)}, only for files that are found in an
+     * external {@link Directory}.
+     */
+    public void addFile(String file, Directory dir) {
         if (merged)
             throw new IllegalStateException(
                 "Can't add extensions after merge has been called");
@@ -133,6 +144,7 @@ public final class CompoundFileWriter {
 
         FileEntry entry = new FileEntry();
         entry.file = file;
+        entry.dir = dir;
         entries.add(entry);
     }
 
@@ -170,7 +182,7 @@ public final class CompoundFileWriter {
                 fe.directoryOffset = os.getFilePointer();
                 os.writeLong(0);    // for now
                 os.writeString(IndexFileNames.stripSegmentName(fe.file));
-                totalSize += directory.fileLength(fe.file);
+                totalSize += fe.dir.fileLength(fe.file);
             }
 
             // Pre-allocate size of file as optimization --
@@ -216,7 +228,7 @@ public final class CompoundFileWriter {
    * output stream.
    */
   private void copyFile(FileEntry source, IndexOutput os) throws IOException {
-    IndexInput is = directory.openInput(source.file);
+    IndexInput is = source.dir.openInput(source.file);
     try {
       long startPtr = os.getFilePointer();
       long length = is.length();

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java Mon May 30 14:51:25 2011
@@ -84,19 +84,44 @@ final class DocFieldProcessor extends Do
 
   @Override
   public void abort() {
-    for(int i=0;i<fieldHash.length;i++) {
-      DocFieldProcessorPerField field = fieldHash[i];
-      while(field != null) {
+    Throwable th = null;
+    
+    for (DocFieldProcessorPerField field : fieldHash) {
+      while (field != null) {
         final DocFieldProcessorPerField next = field.next;
-        field.abort();
+        try {
+          field.abort();
+        } catch (Throwable t) {
+          if (th == null) {
+            th = t;
+          }
+        }
         field = next;
       }
     }
-
+    
     try {
       fieldsWriter.abort();
-    } finally {
+    } catch (Throwable t) {
+      if (th == null) {
+        th = t;
+      }
+    }
+    
+    try {
       consumer.abort();
+    } catch (Throwable t) {
+      if (th == null) {
+        th = t;
+      }
+    }
+    
+    // If any errors occured, throw it.
+    if (th != null) {
+      if (th instanceof RuntimeException) throw (RuntimeException) th;
+      if (th instanceof Error) throw (Error) th;
+      // defensive code - we should not hit unchecked exceptions
+      throw new RuntimeException(th);
     }
   }
 

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocInverter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocInverter.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocInverter.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocInverter.java Mon May 30 14:51:25 2011
@@ -87,6 +87,7 @@ final class DocInverter extends DocField
     endConsumer.startDocument();
   }
 
+  @Override
   public void finishDocument() throws IOException {
     // TODO: allow endConsumer.finishDocument to also return
     // a DocWriter

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocInverterPerField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocInverterPerField.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocInverterPerField.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocInverterPerField.java Mon May 30 14:51:25 2011
@@ -53,8 +53,11 @@ final class DocInverterPerField extends 
 
   @Override
   void abort() {
-    consumer.abort();
-    endConsumer.abort();
+    try {
+      consumer.abort();
+    } finally {
+      endConsumer.abort();
+    }
   }
 
   @Override

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Mon May 30 14:51:25 2011
@@ -228,14 +228,19 @@ final class DocumentsWriter {
       }
 
       final Iterator<ThreadState> threadsIterator = perThreadPool.getActivePerThreadsIterator();
-
       while (threadsIterator.hasNext()) {
-        ThreadState perThread = threadsIterator.next();
+        final ThreadState perThread = threadsIterator.next();
         perThread.lock();
         try {
           if (perThread.isActive()) { // we might be closed
-            perThread.perThread.abort();
-            perThread.perThread.checkAndResetHasAborted();
+            try {
+              perThread.perThread.abort();
+            } catch (IOException ex) {
+              // continue
+            } finally {
+              perThread.perThread.checkAndResetHasAborted();
+              flushControl.doOnAbort(perThread);
+            }
           } else {
             assert closed;
           }
@@ -243,7 +248,6 @@ final class DocumentsWriter {
           perThread.unlock();
         }
       }
-
       success = true;
     } finally {
       if (infoStream != null) {
@@ -274,11 +278,9 @@ final class DocumentsWriter {
     flushControl.setClosed();
   }
 
-  boolean updateDocument(final Document doc, final Analyzer analyzer,
-      final Term delTerm) throws CorruptIndexException, IOException {
+  private boolean preUpdate() throws CorruptIndexException, IOException {
     ensureOpen();
     boolean maybeMerge = false;
-    final boolean isUpdate = delTerm != null;
     if (flushControl.anyStalledThreads() || flushControl.numQueuedFlushes() > 0) {
       // Help out flushing any queued DWPTs so we can un-stall:
       if (infoStream != null) {
@@ -303,13 +305,30 @@ final class DocumentsWriter {
         message("continue indexing after helpling out flushing DocumentsWriter is healthy");
       }
     }
+    return maybeMerge;
+  }
+
+  private boolean postUpdate(DocumentsWriterPerThread flushingDWPT, boolean maybeMerge) throws IOException {
+    if (flushingDWPT != null) {
+      maybeMerge |= doFlush(flushingDWPT);
+    } else {
+      final DocumentsWriterPerThread nextPendingFlush = flushControl.nextPendingFlush();
+      if (nextPendingFlush != null) {
+        maybeMerge |= doFlush(nextPendingFlush);
+      }
+    }
 
-    final ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(),
-        this, doc);
+    return maybeMerge;
+  }
+
+  boolean updateDocuments(final Iterable<Document> docs, final Analyzer analyzer,
+                          final Term delTerm) throws CorruptIndexException, IOException {
+    boolean maybeMerge = preUpdate();
+
+    final ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this);
     final DocumentsWriterPerThread flushingDWPT;
     
     try {
-
       if (!perThread.isActive()) {
         ensureOpen();
         assert false: "perThread is not active but we are still open";
@@ -317,27 +336,53 @@ final class DocumentsWriter {
        
       final DocumentsWriterPerThread dwpt = perThread.perThread;
       try {
-        dwpt.updateDocument(doc, analyzer, delTerm); 
-        numDocsInRAM.incrementAndGet();
+        final int docCount = dwpt.updateDocuments(docs, analyzer, delTerm);
+        numDocsInRAM.addAndGet(docCount);
       } finally {
         if (dwpt.checkAndResetHasAborted()) {
           flushControl.doOnAbort(perThread);
         }
       }
+      final boolean isUpdate = delTerm != null;
       flushingDWPT = flushControl.doAfterDocument(perThread, isUpdate);
     } finally {
       perThread.unlock();
     }
+
+    return postUpdate(flushingDWPT, maybeMerge);
+  }
+
+  boolean updateDocument(final Document doc, final Analyzer analyzer,
+      final Term delTerm) throws CorruptIndexException, IOException {
+
+    boolean maybeMerge = preUpdate();
+
+    final ThreadState perThread = perThreadPool.getAndLock(Thread.currentThread(), this);
+    final DocumentsWriterPerThread flushingDWPT;
     
-    if (flushingDWPT != null) {
-      maybeMerge |= doFlush(flushingDWPT);
-    } else {
-      final DocumentsWriterPerThread nextPendingFlush = flushControl.nextPendingFlush();
-      if (nextPendingFlush != null) {
-        maybeMerge |= doFlush(nextPendingFlush);
+    try {
+
+      if (!perThread.isActive()) {
+        ensureOpen();
+        assert false: "perThread is not active but we are still open";
       }
+       
+      final DocumentsWriterPerThread dwpt = perThread.perThread;
+      try {
+        dwpt.updateDocument(doc, analyzer, delTerm); 
+        numDocsInRAM.incrementAndGet();
+      } finally {
+        if (dwpt.checkAndResetHasAborted()) {
+          flushControl.doOnAbort(perThread);
+        }
+      }
+      final boolean isUpdate = delTerm != null;
+      flushingDWPT = flushControl.doAfterDocument(perThread, isUpdate);
+    } finally {
+      perThread.unlock();
     }
-    return maybeMerge;
+
+    return postUpdate(flushingDWPT, maybeMerge);
   }
 
   private  boolean doFlush(DocumentsWriterPerThread flushingDWPT) throws IOException {
@@ -541,4 +586,20 @@ final class DocumentsWriter {
       return (!isSegmentFlush || segment != null);  
     }
   }
+  
+  // use by IW during close to assert all DWPT are inactive after final flush
+  boolean assertNoActiveDWPT() {
+    Iterator<ThreadState> activePerThreadsIterator = perThreadPool.getAllPerThreadsIterator();
+    while(activePerThreadsIterator.hasNext()) {
+      ThreadState next = activePerThreadsIterator.next();
+      next.lock();
+      try {
+        assert !next.isActive();
+      } finally  {
+        next.unlock();
+      }
+    }
+    return true;
+  }
+ 
 }

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java Mon May 30 14:51:25 2011
@@ -16,6 +16,7 @@ package org.apache.lucene.index;
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -68,7 +69,7 @@ public final class DocumentsWriterFlushC
     this.stallControl = new DocumentsWriterStallControl();
     this.perThreadPool = documentsWriter.perThreadPool;
     this.flushPolicy = documentsWriter.flushPolicy;
-    this.hardMaxBytesPerDWPT = config.getRAMPerThreadHardLimitMB() * 1024 * 1024;;
+    this.hardMaxBytesPerDWPT = config.getRAMPerThreadHardLimitMB() * 1024 * 1024;
     this.config = config;
     this.documentsWriter = documentsWriter;
   }
@@ -162,8 +163,6 @@ public final class DocumentsWriterFlushC
       stallControl.updateStalled(this);
       assert assertMemory();
     }
-    
-    
   }
 
   synchronized void doAfterFlush(DocumentsWriterPerThread dwpt) {
@@ -206,7 +205,7 @@ public final class DocumentsWriterFlushC
     } // don't assert on numDocs since we could hit an abort excp. while selecting that dwpt for flushing
     
   }
-
+  
   synchronized void doOnAbort(ThreadState state) {
     try {
       if (state.flushPending) {
@@ -217,7 +216,7 @@ public final class DocumentsWriterFlushC
       assert assertMemory();
       // Take it out of the loop this DWPT is stale
       perThreadPool.replaceForFlush(state, closed);
-    }finally {
+    } finally {
       stallControl.updateStalled(this);
     }
   }
@@ -305,6 +304,7 @@ public final class DocumentsWriterFlushC
   synchronized void setClosed() {
     // set by DW to signal that we should not release new DWPT after close
     this.closed = true;
+    perThreadPool.deactivateUnreleasedStates();
   }
 
   /**
@@ -387,8 +387,12 @@ public final class DocumentsWriterFlushC
             toFlush.add(flushingDWPT);
           }
         } else {
-          // get the new delete queue from DW
-          next.perThread.initialize();
+          if (closed) {
+            next.resetWriter(null); // make this state inactive
+          } else {
+            // get the new delete queue from DW
+            next.perThread.initialize();
+          }
         }
       } finally {
         next.unlock();
@@ -451,10 +455,21 @@ public final class DocumentsWriterFlushC
     try {
       for (DocumentsWriterPerThread dwpt : flushQueue) {
         doAfterFlush(dwpt);
+        try {
+          dwpt.abort();
+        } catch (IOException ex) {
+          // continue
+        }
       }
       for (BlockedFlush blockedFlush : blockedFlushes) {
-        flushingWriters.put(blockedFlush.dwpt, Long.valueOf(blockedFlush.bytes));
+        flushingWriters
+            .put(blockedFlush.dwpt, Long.valueOf(blockedFlush.bytes));
         doAfterFlush(blockedFlush.dwpt);
+        try {
+          blockedFlush.dwpt.abort();
+        } catch (IOException ex) {
+          // continue
+        }
       }
     } finally {
       fullFlush = false;
@@ -512,5 +527,4 @@ public final class DocumentsWriterFlushC
   boolean anyStalledThreads() {
     return stallControl.anyStalledThreads();
   }
- 
-}
\ No newline at end of file
+}

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java Mon May 30 14:51:25 2011
@@ -104,7 +104,7 @@ public class DocumentsWriterPerThread {
       // largish:
       doc = null;
       analyzer = null;
-  }
+    }
   }
 
   static class FlushedSegment {
@@ -177,7 +177,7 @@ public class DocumentsWriterPerThread {
     this.parent = parent;
     this.fieldInfos = fieldInfos;
     this.writer = parent.indexWriter;
-    this.infoStream = parent.indexWriter.getInfoStream();
+    this.infoStream = parent.infoStream;
     this.docState = new DocState(this);
     this.docState.similarityProvider = parent.indexWriter.getConfig()
         .getSimilarityProvider();
@@ -253,6 +253,82 @@ public class DocumentsWriterPerThread {
     finishDocument(delTerm);
   }
   
+  public int updateDocuments(Iterable<Document> docs, Analyzer analyzer, Term delTerm) throws IOException {
+    assert writer.testPoint("DocumentsWriterPerThread addDocuments start");
+    assert deleteQueue != null;
+    docState.analyzer = analyzer;
+    if (segment == null) {
+      // this call is synchronized on IndexWriter.segmentInfos
+      segment = writer.newSegmentName();
+      assert numDocsInRAM == 0;
+    }
+
+    int docCount = 0;
+    try {
+      for(Document doc : docs) {
+        docState.doc = doc;
+        docState.docID = numDocsInRAM;
+        docCount++;
+
+        boolean success = false;
+        try {
+          consumer.processDocument(fieldInfos);
+          success = true;
+        } finally {
+          if (!success) {
+            // An exc is being thrown...
+
+            if (!aborting) {
+              // One of the documents hit a non-aborting
+              // exception (eg something happened during
+              // analysis).  We now go and mark any docs
+              // from this batch that we had already indexed
+              // as deleted:
+              int docID = docState.docID;
+              final int endDocID = docID - docCount;
+              while (docID > endDocID) {
+                deleteDocID(docID);
+                docID--;
+              }
+
+              // Incr here because finishDocument will not
+              // be called (because an exc is being thrown):
+              numDocsInRAM++;
+              fieldInfos.revertUncommitted();
+            } else {
+              abort();
+            }
+          }
+        }
+        success = false;
+        try {
+          consumer.finishDocument();
+          success = true;
+        } finally {
+          if (!success) {
+            abort();
+          }
+        }
+
+        finishDocument(null);
+      }
+
+      // Apply delTerm only after all indexing has
+      // succeeded, but apply it only to docs prior to when
+      // this batch started:
+      if (delTerm != null) {
+        deleteQueue.add(delTerm, deleteSlice);
+        assert deleteSlice.isTailItem(delTerm) : "expected the delete term as the tail item";
+        deleteSlice.apply(pendingDeletes, numDocsInRAM-docCount);
+      }
+
+    } finally {
+      docState.clear();
+    }
+
+    return docCount;
+  }
+  
   private void finishDocument(Term delTerm) throws IOException {
     /*
      * here we actually finish the document in two steps 1. push the delete into
@@ -474,6 +550,7 @@ public class DocumentsWriterPerThread {
       super(blockSize);
     }
 
+    @Override
     public byte[] getByteBlock() {
       bytesUsed.addAndGet(blockSize);
       return new byte[blockSize];
@@ -486,7 +563,7 @@ public class DocumentsWriterPerThread {
       }
     }
     
-  };
+  }
   
   void setInfoStream(PrintStream infoStream) {
     this.infoStream = infoStream;

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThreadPool.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThreadPool.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThreadPool.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThreadPool.java Mon May 30 14:51:25 2011
@@ -19,7 +19,6 @@ package org.apache.lucene.index;
 import java.util.Iterator;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.FieldInfos.FieldNumberBiMap;
 import org.apache.lucene.index.SegmentCodecs.SegmentCodecsBuilder;
 import org.apache.lucene.index.codecs.CodecProvider;
@@ -194,6 +193,21 @@ public abstract class DocumentsWriterPer
     return null;
   }
   
+  /**
+   * Deactivate all unreleased threadstates 
+   */
+  protected synchronized void deactivateUnreleasedStates() {
+    for (int i = numThreadStatesActive; i < perThreads.length; i++) {
+      final ThreadState threadState = perThreads[i];
+      threadState.lock();
+      try {
+        threadState.resetWriter(null);
+      } finally {
+        threadState.unlock();
+      }
+    }
+  }
+  
   protected DocumentsWriterPerThread replaceForFlush(ThreadState threadState, boolean closed) {
     assert threadState.isHeldByCurrentThread();
     final DocumentsWriterPerThread dwpt = threadState.perThread;
@@ -212,7 +226,7 @@ public abstract class DocumentsWriterPer
     // don't recycle DWPT by default
   }
   
-  public abstract ThreadState getAndLock(Thread requestingThread, DocumentsWriter documentsWriter, Document doc);
+  public abstract ThreadState getAndLock(Thread requestingThread, DocumentsWriter documentsWriter);
 
   /**
    * Returns an iterator providing access to all {@link ThreadState}

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/FieldsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/FieldsWriter.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/FieldsWriter.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/FieldsWriter.java Mon May 30 14:51:25 2011
@@ -113,7 +113,7 @@ final class FieldsWriter {
   void close() throws IOException {
     if (directory != null) {
       try {
-        IOUtils.closeSafely(fieldsStream, indexStream);
+        IOUtils.closeSafely(false, fieldsStream, indexStream);
       } finally {
         fieldsStream = indexStream = null;
       }

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java Mon May 30 14:51:25 2011
@@ -57,9 +57,10 @@ final class FreqProxTermsWriter extends 
 
     final FieldsConsumer consumer = state.segmentCodecs.codec().fieldsConsumer(state);
 
-    TermsHash termsHash = null;
-
-    /*
+    try {
+      TermsHash termsHash = null;
+      
+      /*
     Current writer chain:
       FieldsConsumer
         -> IMPL: FormatPostingsTermsDictWriter
@@ -69,36 +70,38 @@ final class FreqProxTermsWriter extends 
                 -> IMPL: FormatPostingsDocsWriter
                   -> PositionsConsumer
                     -> IMPL: FormatPostingsPositionsWriter
-    */
-
-    for (int fieldNumber = 0; fieldNumber < numAllFields; fieldNumber++) {
-      final FieldInfo fieldInfo = allFields.get(fieldNumber).fieldInfo;
-
-      final FreqProxTermsWriterPerField fieldWriter = allFields.get(fieldNumber);
-
-      // Aggregate the storePayload as seen by the same
-      // field across multiple threads
-      if (!fieldInfo.omitTermFreqAndPositions) {
-        fieldInfo.storePayloads |= fieldWriter.hasPayloads;
+       */
+      
+      for (int fieldNumber = 0; fieldNumber < numAllFields; fieldNumber++) {
+        final FieldInfo fieldInfo = allFields.get(fieldNumber).fieldInfo;
+        
+        final FreqProxTermsWriterPerField fieldWriter = allFields.get(fieldNumber);
+        
+        // Aggregate the storePayload as seen by the same
+        // field across multiple threads
+        if (!fieldInfo.omitTermFreqAndPositions) {
+          fieldInfo.storePayloads |= fieldWriter.hasPayloads;
+        }
+        
+        // If this field has postings then add them to the
+        // segment
+        fieldWriter.flush(fieldInfo.name, consumer, state);
+        
+        TermsHashPerField perField = fieldWriter.termsHashPerField;
+        assert termsHash == null || termsHash == perField.termsHash;
+        termsHash = perField.termsHash;
+        int numPostings = perField.bytesHash.size();
+        perField.reset();
+        perField.shrinkHash(numPostings);
+        fieldWriter.reset();
       }
-
-      // If this field has postings then add them to the
-      // segment
-      fieldWriter.flush(fieldInfo.name, consumer, state);
-
-      TermsHashPerField perField = fieldWriter.termsHashPerField;
-      assert termsHash == null || termsHash == perField.termsHash;
-      termsHash = perField.termsHash;
-      int numPostings = perField.bytesHash.size();
-      perField.reset();
-      perField.shrinkHash(numPostings);
-      fieldWriter.reset();
-    }
-
-    if (termsHash != null) {
-      termsHash.reset();
+      
+      if (termsHash != null) {
+        termsHash.reset();
+      }
+    } finally {
+      consumer.close();
     }
-    consumer.close();
   }
 
   BytesRef payload;

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/IndexFileNames.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/IndexFileNames.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/IndexFileNames.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/IndexFileNames.java Mon May 30 14:51:25 2011
@@ -17,6 +17,8 @@ package org.apache.lucene.index;
  * limitations under the License.
  */
 
+import java.util.regex.Pattern;
+
 import org.apache.lucene.index.codecs.Codec;  // for javadocs
 
 /**
@@ -238,5 +240,16 @@ public final class IndexFileNames {
     }
     return filename;
   }
+
+  /**
+   * Returns true if the given filename ends with the separate norms file
+   * pattern: {@code SEPARATE_NORMS_EXTENSION + "[0-9]+"}.
+   */
+  public static boolean isSeparateNormsFile(String filename) {
+    int idx = filename.lastIndexOf('.');
+    if (idx == -1) return false;
+    String ext = filename.substring(idx + 1);
+    return Pattern.matches(SEPARATE_NORMS_EXTENSION + "[0-9]+", ext);
+  }
   
 }

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/IndexWriter.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/IndexWriter.java Mon May 30 14:51:25 2011
@@ -23,6 +23,7 @@ import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -51,6 +52,7 @@ import org.apache.lucene.store.LockObtai
 import org.apache.lucene.util.BitVector;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.Constants;
+import org.apache.lucene.util.StringHelper;
 import org.apache.lucene.util.ThreadInterruptedException;
 import org.apache.lucene.util.MapBackedSet;
 
@@ -1071,7 +1073,8 @@ public class IndexWriter implements Clos
 
       if (infoStream != null)
         message("at close: " + segString());
-
+      // used by assert below
+      final DocumentsWriter oldWriter = docWriter;
       synchronized(this) {
         readerPool.close();
         docWriter = null;
@@ -1085,6 +1088,7 @@ public class IndexWriter implements Clos
       synchronized(this) {
         closed = true;
       }
+      assert oldWriter.assertNoActiveDWPT();
     } catch (OutOfMemoryError oom) {
       handleOOM(oom, "closeInternal");
     } finally {
@@ -1098,6 +1102,8 @@ public class IndexWriter implements Clos
       }
     }
   }
+  
+ 
 
   /** Returns the Directory used by this index. */
   public Directory getDirectory() {
@@ -1228,6 +1234,111 @@ public class IndexWriter implements Clos
   }
 
   /**
+   * Atomically adds a block of documents with sequentially
+   * assigned document IDs, such that an external reader
+   * will see all or none of the documents.
+   *
+   * <p><b>WARNING</b>: the index does not currently record
+   * which documents were added as a block.  Today this is
+   * fine, because merging will preserve the block (as long
+   * as none them were deleted).  But it's possible in the
+   * future that Lucene may more aggressively re-order
+   * documents (for example, perhaps to obtain better index
+   * compression), in which case you may need to fully
+   * re-index your documents at that time.
+   *
+   * <p>See {@link #addDocument(Document)} for details on
+   * index and IndexWriter state after an Exception, and
+   * flushing/merging temporary free space requirements.</p>
+   *
+   * <p><b>NOTE</b>: tools that do offline splitting of an index
+   * (for example, IndexSplitter in contrib) or
+   * re-sorting of documents (for example, IndexSorter in
+   * contrib) are not aware of these atomically added documents
+   * and will likely break them up.  Use such tools at your
+   * own risk!
+   *
+   * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
+   * you should immediately close the writer.  See <a
+   * href="#OOME">above</a> for details.</p>
+   *
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws IOException if there is a low-level IO error
+   *
+   * @lucene.experimental
+   */
+  public void addDocuments(Iterable<Document> docs) throws CorruptIndexException, IOException {
+    addDocuments(docs, analyzer);
+  }
+
+  /**
+   * Atomically adds a block of documents, analyzed using the
+   * provided analyzer, with sequentially assigned document
+   * IDs, such that an external reader will see all or none
+   * of the documents. 
+   *
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws IOException if there is a low-level IO error
+   *
+   * @lucene.experimental
+   */
+  public void addDocuments(Iterable<Document> docs, Analyzer analyzer) throws CorruptIndexException, IOException {
+    updateDocuments(null, docs, analyzer);
+  }
+
+  /**
+   * Atomically deletes documents matching the provided
+   * delTerm and adds a block of documents with sequentially
+   * assigned document IDs, such that an external reader
+   * will see all or none of the documents. 
+   *
+   * See {@link #addDocuments(Iterable)}.
+   *
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws IOException if there is a low-level IO error
+   *
+   * @lucene.experimental
+   */
+  public void updateDocuments(Term delTerm, Iterable<Document> docs) throws CorruptIndexException, IOException {
+    updateDocuments(delTerm, docs, analyzer);
+  }
+
+  /**
+   * Atomically deletes documents matching the provided
+   * delTerm and adds a block of documents, analyzed  using
+   * the provided analyzer, with sequentially
+   * assigned document IDs, such that an external reader
+   * will see all or none of the documents. 
+   *
+   * See {@link #addDocuments(Iterable)}.
+   *
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws IOException if there is a low-level IO error
+   *
+   * @lucene.experimental
+   */
+  public void updateDocuments(Term delTerm, Iterable<Document> docs, Analyzer analyzer) throws CorruptIndexException, IOException {
+    ensureOpen();
+    try {
+      boolean success = false;
+      boolean anySegmentFlushed = false;
+      try {
+        anySegmentFlushed = docWriter.updateDocuments(docs, analyzer, delTerm);
+        success = true;
+      } finally {
+        if (!success && infoStream != null) {
+          message("hit exception updating document");
+        }
+      }
+      if (anySegmentFlushed) {
+        maybeMerge();
+      }
+    } catch (OutOfMemoryError oom) {
+      handleOOM(oom, "updateDocuments");
+    }
+  }
+
+  /**
    * Deletes the document(s) containing <code>term</code>.
    *
    * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
@@ -2217,10 +2328,10 @@ public class IndexWriter implements Clos
    * <p>
    * <b>NOTE:</b> this method only copies the segments of the incoming indexes
    * and does not merge them. Therefore deleted documents are not removed and
-   * the new segments are not merged with the existing ones. Also, the segments
-   * are copied as-is, meaning they are not converted to CFS if they aren't,
-   * and vice-versa. If you wish to do that, you can call {@link #maybeMerge}
-   * or {@link #optimize} afterwards.
+   * the new segments are not merged with the existing ones. Also, if the merge 
+   * policy allows compound files, then any segment that is not compound is 
+   * converted to such. However, if the segment is compound, it is copied as-is
+   * even if the merge policy does not allow compound files.
    *
    * <p>This requires this index not be among those to be added.
    *
@@ -2244,6 +2355,7 @@ public class IndexWriter implements Clos
 
       int docCount = 0;
       List<SegmentInfo> infos = new ArrayList<SegmentInfo>();
+      Comparator<String> versionComparator = StringHelper.getVersionComparator();
       for (Directory dir : dirs) {
         if (infoStream != null) {
           message("addIndexes: process directory " + dir);
@@ -2263,46 +2375,22 @@ public class IndexWriter implements Clos
             message("addIndexes: process segment origName=" + info.name + " newName=" + newSegName + " dsName=" + dsName + " info=" + info);
           }
 
-          // Determine if the doc store of this segment needs to be copied. It's
-          // only relevant for segments who share doc store with others, because
-          // the DS might have been copied already, in which case we just want
-          // to update the DS name of this SegmentInfo.
-          // NOTE: pre-3x segments include a null DSName if they don't share doc
-          // store. So the following code ensures we don't accidentally insert
-          // 'null' to the map.
-          final String newDsName;
-          if (dsName != null) {
-            if (dsNames.containsKey(dsName)) {
-              newDsName = dsNames.get(dsName);
-            } else {
-              dsNames.put(dsName, newSegName);
-              newDsName = newSegName;
-            }
-          } else {
-            newDsName = newSegName;
+          // create CFS only if the source segment is not CFS, and MP agrees it
+          // should be CFS.
+          boolean createCFS;
+          synchronized (this) { // Guard segmentInfos
+            createCFS = !info.getUseCompoundFile()
+                && mergePolicy.useCompoundFile(segmentInfos, info)
+                // optimize case only for segments that don't share doc stores
+                && versionComparator.compare(info.getVersion(), "3.1") >= 0;
           }
 
-          // Copy the segment files
-          for (String file: info.files()) {
-            final String newFileName;
-            if (IndexFileNames.isDocStoreFile(file)) {
-              newFileName = newDsName + IndexFileNames.stripSegmentName(file);
-              if (dsFilesCopied.contains(newFileName)) {
-                continue;
-              }
-              dsFilesCopied.add(newFileName);
-            } else {
-              newFileName = newSegName + IndexFileNames.stripSegmentName(file);
-            }
-            assert !directory.fileExists(newFileName): "file \"" + newFileName + "\" already exists";
-            dir.copy(directory, file, newFileName);
+          if (createCFS) {
+            copySegmentIntoCFS(info, newSegName);
+          } else {
+            copySegmentAsIs(info, newSegName, dsNames, dsFilesCopied);
           }
 
-          // Update SI appropriately
-          info.setDocStore(info.getDocStoreOffset(), newDsName, info.getDocStoreIsCompoundFile());
-          info.dir = directory;
-          info.name = newSegName;
-
           infos.add(info);
         }
       }
@@ -2391,6 +2479,76 @@ public class IndexWriter implements Clos
     }
   }
 
+  /** Copies the segment into the IndexWriter's directory, as a compound segment. */
+  private void copySegmentIntoCFS(SegmentInfo info, String segName) throws IOException {
+    String segFileName = IndexFileNames.segmentFileName(segName, "", IndexFileNames.COMPOUND_FILE_EXTENSION);
+    Collection<String> files = info.files();
+    CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, segFileName);
+    for (String file : files) {
+      String newFileName = segName + IndexFileNames.stripSegmentName(file);
+      if (!IndexFileNames.matchesExtension(file, IndexFileNames.DELETES_EXTENSION)
+          && !IndexFileNames.isSeparateNormsFile(file)) {
+        cfsWriter.addFile(file, info.dir);
+      } else {
+        assert !directory.fileExists(newFileName): "file \"" + newFileName + "\" already exists";
+        info.dir.copy(directory, file, newFileName);
+      }
+    }
+    
+    // Create the .cfs
+    cfsWriter.close();
+    
+    info.dir = directory;
+    info.name = segName;
+    info.setUseCompoundFile(true);
+  }
+  
+  /** Copies the segment files as-is into the IndexWriter's directory. */
+  private void copySegmentAsIs(SegmentInfo info, String segName,
+      Map<String, String> dsNames, Set<String> dsFilesCopied)
+      throws IOException {
+    // Determine if the doc store of this segment needs to be copied. It's
+    // only relevant for segments that share doc store with others,
+    // because the DS might have been copied already, in which case we
+    // just want to update the DS name of this SegmentInfo.
+    // NOTE: pre-3x segments include a null DSName if they don't share doc
+    // store. The following code ensures we don't accidentally insert
+    // 'null' to the map.
+    String dsName = info.getDocStoreSegment();
+    final String newDsName;
+    if (dsName != null) {
+      if (dsNames.containsKey(dsName)) {
+        newDsName = dsNames.get(dsName);
+      } else {
+        dsNames.put(dsName, segName);
+        newDsName = segName;
+      }
+    } else {
+      newDsName = segName;
+    }
+    
+    // Copy the segment files
+    for (String file: info.files()) {
+      final String newFileName;
+      if (IndexFileNames.isDocStoreFile(file)) {
+        newFileName = newDsName + IndexFileNames.stripSegmentName(file);
+        if (dsFilesCopied.contains(newFileName)) {
+          continue;
+        }
+        dsFilesCopied.add(newFileName);
+      } else {
+        newFileName = segName + IndexFileNames.stripSegmentName(file);
+      }
+      
+      assert !directory.fileExists(newFileName): "file \"" + newFileName + "\" already exists";
+      info.dir.copy(directory, file, newFileName);
+    }
+    
+    info.setDocStore(info.getDocStoreOffset(), newDsName, info.getDocStoreIsCompoundFile());
+    info.dir = directory;
+    info.name = segName;
+  }
+  
   /**
    * A hook for extending classes to execute operations after pending added and
    * deleted documents have been flushed to the Directory but before the change
@@ -3176,50 +3334,50 @@ public class IndexWriter implements Clos
     runningMerges.remove(merge);
   }
 
-  private synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions) throws IOException {
+  private final synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions) throws IOException {
     final int numSegments = merge.readers.size();
-    if (suppressExceptions) {
-      // Suppress any new exceptions so we throw the
-      // original cause
-      boolean anyChanges = false;
-      for (int i=0;i<numSegments;i++) {
-        if (merge.readers.get(i) != null) {
-          try {
-            anyChanges |= readerPool.release(merge.readers.get(i), false);
-          } catch (Throwable t) {
-          }
-          merge.readers.set(i, null);
-        }
-
-        if (i < merge.readerClones.size() && merge.readerClones.get(i) != null) {
-          try {
-            merge.readerClones.get(i).close();
-          } catch (Throwable t) {
-          }
-          // This was a private clone and we had the
-          // only reference
-          assert merge.readerClones.get(i).getRefCount() == 0: "refCount should be 0 but is " + merge.readerClones.get(i).getRefCount();
-          merge.readerClones.set(i, null);
+    Throwable th = null;
+    
+    boolean anyChanges = false;
+    boolean drop = !suppressExceptions;
+    for (int i = 0; i < numSegments; i++) {
+      if (merge.readers.get(i) != null) {
+        try {
+          anyChanges |= readerPool.release(merge.readers.get(i), drop);
+        } catch (Throwable t) {
+          if (th == null) {
+            th = t;
+          }
         }
+        merge.readers.set(i, null);
       }
-      if (anyChanges) {
-        checkpoint();
-      }
-    } else {
-      for (int i=0;i<numSegments;i++) {
-        if (merge.readers.get(i) != null) {
-          readerPool.release(merge.readers.get(i), true);
-          merge.readers.set(i, null);
-        }
-
-        if (i < merge.readerClones.size() && merge.readerClones.get(i) != null) {
+      
+      if (i < merge.readerClones.size() && merge.readerClones.get(i) != null) {
+        try {
           merge.readerClones.get(i).close();
-          // This was a private clone and we had the only reference
-          assert merge.readerClones.get(i).getRefCount() == 0;
-          merge.readerClones.set(i, null);
+        } catch (Throwable t) {
+          if (th == null) {
+            th = t;
+          }
         }
+        // This was a private clone and we had the
+        // only reference
+        assert merge.readerClones.get(i).getRefCount() == 0: "refCount should be 0 but is " + merge.readerClones.get(i).getRefCount();
+        merge.readerClones.set(i, null);
       }
     }
+    
+    if (suppressExceptions && anyChanges) {
+      checkpoint();
+    }
+    
+    // If any error occured, throw it.
+    if (!suppressExceptions && th != null) {
+      if (th instanceof IOException) throw (IOException) th;
+      if (th instanceof RuntimeException) throw (RuntimeException) th;
+      if (th instanceof Error) throw (Error) th;
+      throw new RuntimeException(th);
+    }
   }
 
   /** Does the actual (time-consuming) work of the merge,

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/NormsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/NormsWriter.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/NormsWriter.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/NormsWriter.java Mon May 30 14:51:25 2011
@@ -22,6 +22,7 @@ import java.util.Collection;
 import java.util.Map;
 
 import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.IOUtils;
 
 // TODO FI: norms could actually be stored as doc store
 
@@ -49,9 +50,9 @@ final class NormsWriter extends Inverted
 
     final String normsFileName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.NORMS_EXTENSION);
     IndexOutput normsOut = state.directory.createOutput(normsFileName);
-
+    boolean success = false;
     try {
-      normsOut.writeBytes(SegmentMerger.NORMS_HEADER, 0, SegmentMerger.NORMS_HEADER.length);
+      normsOut.writeBytes(SegmentNorms.NORMS_HEADER, 0, SegmentNorms.NORMS_HEADER.length);
 
       int normCount = 0;
 
@@ -84,9 +85,9 @@ final class NormsWriter extends Inverted
 
         assert 4+normCount*state.numDocs == normsOut.getFilePointer() : ".nrm file size mismatch: expected=" + (4+normCount*state.numDocs) + " actual=" + normsOut.getFilePointer();
       }
-
+      success = true;
     } finally {
-      normsOut.close();
+      IOUtils.closeSafely(!success, normsOut);
     }
   }
 

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java Mon May 30 14:51:25 2011
@@ -30,6 +30,7 @@ import org.apache.lucene.index.codecs.Fi
 import org.apache.lucene.index.codecs.FieldsProducer;
 import org.apache.lucene.index.codecs.TermsConsumer;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
 
 /**
  * Enables native per field codec support. This class selects the codec used to
@@ -61,7 +62,15 @@ final class PerFieldCodecWrapper extends
       assert segmentCodecs == state.segmentCodecs;
       final Codec[] codecs = segmentCodecs.codecs;
       for (int i = 0; i < codecs.length; i++) {
-        consumers.add(codecs[i].fieldsConsumer(new SegmentWriteState(state, "" + i)));
+        boolean success = false;
+        try {
+          consumers.add(codecs[i].fieldsConsumer(new SegmentWriteState(state, "" + i)));
+          success = true;
+        } finally {
+          if (!success) {
+            IOUtils.closeSafely(true, consumers);
+          }
+        }
       }
     }
 
@@ -74,22 +83,7 @@ final class PerFieldCodecWrapper extends
 
     @Override
     public void close() throws IOException {
-      Iterator<FieldsConsumer> it = consumers.iterator();
-      IOException err = null;
-      while (it.hasNext()) {
-        try {
-          it.next().close();
-        } catch (IOException ioe) {
-          // keep first IOException we hit but keep
-          // closing the rest
-          if (err == null) {
-            err = ioe;
-          }
-        }
-      }
-      if (err != null) {
-        throw err;
-      }
+      IOUtils.closeSafely(false, consumers);
     }
   }
 
@@ -122,14 +116,7 @@ final class PerFieldCodecWrapper extends
           // If we hit exception (eg, IOE because writer was
           // committing, or, for any other reason) we must
           // go back and close all FieldsProducers we opened:
-          for(FieldsProducer fp : producers.values()) {
-            try {
-              fp.close();
-            } catch (Throwable t) {
-              // Suppress all exceptions here so we continue
-              // to throw the original one
-            }
-          }
+          IOUtils.closeSafely(true, producers.values());
         }
       }
     }
@@ -177,22 +164,7 @@ final class PerFieldCodecWrapper extends
 
     @Override
     public void close() throws IOException {
-      Iterator<FieldsProducer> it = codecs.values().iterator();
-      IOException err = null;
-      while (it.hasNext()) {
-        try {
-          it.next().close();
-        } catch (IOException ioe) {
-          // keep first IOException we hit but keep
-          // closing the rest
-          if (err == null) {
-            err = ioe;
-          }
-        }
-      }
-      if (err != null) {
-        throw err;
-      }
+      IOUtils.closeSafely(false, codecs.values());
     }
 
     @Override

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java Mon May 30 14:51:25 2011
@@ -59,7 +59,7 @@ public class PersistentSnapshotDeletionP
 
   /**
    * Reads the snapshots information from the given {@link Directory}. This
-   * method does can be used if the snapshots information is needed, however you
+   * method can be used if the snapshots information is needed, however you
    * cannot instantiate the deletion policy (because e.g., some other process
    * keeps a lock on the snapshots directory).
    */
@@ -122,11 +122,19 @@ public class PersistentSnapshotDeletionP
       writer.commit();
     }
 
-    // Initializes the snapshots information. This code should basically run
-    // only if mode != CREATE, but if it is, it's no harm as we only open the
-    // reader once and immediately close it.
-    for (Entry<String, String> e : readSnapshotsInfo(dir).entrySet()) {
-      registerSnapshotInfo(e.getKey(), e.getValue(), null);
+    try {
+      // Initializes the snapshots information. This code should basically run
+      // only if mode != CREATE, but if it is, it's no harm as we only open the
+      // reader once and immediately close it.
+      for (Entry<String, String> e : readSnapshotsInfo(dir).entrySet()) {
+        registerSnapshotInfo(e.getKey(), e.getValue(), null);
+      }
+    } catch (RuntimeException e) {
+      writer.close(); // don't leave any open file handles
+      throw e;
+    } catch (IOException e) {
+      writer.close(); // don't leave any open file handles
+      throw e;
     }
   }
 

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/SegmentInfo.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/SegmentInfo.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/SegmentInfo.java Mon May 30 14:51:25 2011
@@ -436,7 +436,7 @@ public final class SegmentInfo implement
    */
   public String getNormFileName(int number) {
     if (hasSeparateNorms(number)) {
-      return IndexFileNames.fileNameFromGeneration(name, "s" + number, normGen.get(number));
+      return IndexFileNames.fileNameFromGeneration(name, IndexFileNames.SEPARATE_NORMS_EXTENSION + number, normGen.get(number));
     } else {
       // single file for all norms
       return IndexFileNames.fileNameFromGeneration(name, IndexFileNames.NORMS_EXTENSION, WITHOUT_GEN);

Modified: lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/SegmentInfos.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/SegmentInfos.java?rev=1129205&r1=1129204&r2=1129205&view=diff
==============================================================================
--- lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/SegmentInfos.java (original)
+++ lucene/dev/branches/solr2452/lucene/src/java/org/apache/lucene/index/SegmentInfos.java Mon May 30 14:51:25 2011
@@ -40,6 +40,7 @@ import org.apache.lucene.store.Directory
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.NoSuchDirectoryException;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.ThreadInterruptedException;
 
 /**
@@ -323,17 +324,13 @@ public final class SegmentInfos implemen
       SegmentInfosWriter infosWriter = codecs.getSegmentInfosWriter();
       segnOutput = infosWriter.writeInfos(directory, segmentFileName, this);
       infosWriter.prepareCommit(segnOutput);
-      success = true;
       pendingSegnOutput = segnOutput;
+      success = true;
     } finally {
       if (!success) {
         // We hit an exception above; try to close the file
         // but suppress any exception:
-        try {
-          segnOutput.close();
-        } catch (Throwable t) {
-          // Suppress so we keep throwing the original exception
-        }
+        IOUtils.closeSafely(true, segnOutput);
         try {
           // Try not to leave a truncated segments_N file in
           // the index:
@@ -945,6 +942,8 @@ public final class SegmentInfos implemen
       } finally {
         genOutput.close();
       }
+    } catch (ThreadInterruptedException t) {
+      throw t;
     } catch (Throwable t) {
       // It's OK if we fail to write this file since it's
       // used only as one of the retry fallbacks.
@@ -962,7 +961,6 @@ public final class SegmentInfos implemen
     prepareCommit(dir);
     finishCommit(dir);
   }
-  
 
   public String toString(Directory directory) {
     StringBuilder buffer = new StringBuilder();