You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2012/08/07 17:21:12 UTC

svn commit: r1370309 - /lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java

Author: rmuir
Date: Tue Aug  7 15:21:12 2012
New Revision: 1370309

URL: http://svn.apache.org/viewvc?rev=1370309&view=rev
Log:
rename confusing variables: numDocs is really docFreq, docFreqs is really termFreqs, termDocFreq is termFreq

Modified:
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java?rev=1370309&r1=1370308&r2=1370309&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java Tue Aug  7 15:21:12 2012
@@ -173,7 +173,7 @@ final class FreqProxTermsWriterPerField 
       postings.lastDocCodes[termID] = docState.docID;
     } else {
       postings.lastDocCodes[termID] = docState.docID << 1;
-      postings.docFreqs[termID] = 1;
+      postings.termFreqs[termID] = 1;
       if (hasProx) {
         writeProx(termID, fieldState.position);
         if (hasOffsets) {
@@ -194,10 +194,10 @@ final class FreqProxTermsWriterPerField 
 
     FreqProxPostingsArray postings = (FreqProxPostingsArray) termsHashPerField.postingsArray;
 
-    assert !hasFreq || postings.docFreqs[termID] > 0;
+    assert !hasFreq || postings.termFreqs[termID] > 0;
 
     if (!hasFreq) {
-      assert postings.docFreqs == null;
+      assert postings.termFreqs == null;
       if (docState.docID != postings.lastDocIDs[termID]) {
         assert docState.docID > postings.lastDocIDs[termID];
         termsHashPerField.writeVInt(0, postings.lastDocCodes[termID]);
@@ -212,13 +212,13 @@ final class FreqProxTermsWriterPerField 
 
       // Now that we know doc freq for previous doc,
       // write it & lastDocCode
-      if (1 == postings.docFreqs[termID]) {
+      if (1 == postings.termFreqs[termID]) {
         termsHashPerField.writeVInt(0, postings.lastDocCodes[termID]|1);
       } else {
         termsHashPerField.writeVInt(0, postings.lastDocCodes[termID]);
-        termsHashPerField.writeVInt(0, postings.docFreqs[termID]);
+        termsHashPerField.writeVInt(0, postings.termFreqs[termID]);
       }
-      postings.docFreqs[termID] = 1;
+      postings.termFreqs[termID] = 1;
       fieldState.maxTermFrequency = Math.max(1, fieldState.maxTermFrequency);
       postings.lastDocCodes[termID] = (docState.docID - postings.lastDocIDs[termID]) << 1;
       postings.lastDocIDs[termID] = docState.docID;
@@ -233,7 +233,7 @@ final class FreqProxTermsWriterPerField 
       }
       fieldState.uniqueTermCount++;
     } else {
-      fieldState.maxTermFrequency = Math.max(fieldState.maxTermFrequency, ++postings.docFreqs[termID]);
+      fieldState.maxTermFrequency = Math.max(fieldState.maxTermFrequency, ++postings.termFreqs[termID]);
       if (hasProx) {
         writeProx(termID, fieldState.position-postings.lastPositions[termID]);
       }
@@ -252,7 +252,7 @@ final class FreqProxTermsWriterPerField 
     public FreqProxPostingsArray(int size, boolean writeFreqs, boolean writeProx, boolean writeOffsets) {
       super(size);
       if (writeFreqs) {
-        docFreqs = new int[size];
+        termFreqs = new int[size];
       }
       lastDocIDs = new int[size];
       lastDocCodes = new int[size];
@@ -267,7 +267,7 @@ final class FreqProxTermsWriterPerField 
       //System.out.println("PA init freqs=" + writeFreqs + " pos=" + writeProx + " offs=" + writeOffsets);
     }
 
-    int docFreqs[];                                    // # times this term occurs in the current doc
+    int termFreqs[];                                   // # times this term occurs in the current doc
     int lastDocIDs[];                                  // Last docID where this term occurred
     int lastDocCodes[];                                // Code for prior doc
     int lastPositions[];                               // Last position where this term occurred
@@ -275,7 +275,7 @@ final class FreqProxTermsWriterPerField 
 
     @Override
     ParallelPostingsArray newInstance(int size) {
-      return new FreqProxPostingsArray(size, docFreqs != null, lastPositions != null, lastOffsets != null);
+      return new FreqProxPostingsArray(size, termFreqs != null, lastPositions != null, lastOffsets != null);
     }
 
     @Override
@@ -295,9 +295,9 @@ final class FreqProxTermsWriterPerField 
         assert to.lastOffsets != null;
         System.arraycopy(lastOffsets, 0, to.lastOffsets, 0, numToCopy);
       }
-      if (docFreqs != null) {
-        assert to.docFreqs != null;
-        System.arraycopy(docFreqs, 0, to.docFreqs, 0, numToCopy);
+      if (termFreqs != null) {
+        assert to.termFreqs != null;
+        System.arraycopy(termFreqs, 0, to.termFreqs, 0, numToCopy);
       }
     }
 
@@ -310,7 +310,7 @@ final class FreqProxTermsWriterPerField 
       if (lastOffsets != null) {
         bytes += RamUsageEstimator.NUM_BYTES_INT;
       }
-      if (docFreqs != null) {
+      if (termFreqs != null) {
         bytes += RamUsageEstimator.NUM_BYTES_INT;
       }
 
@@ -416,21 +416,21 @@ final class FreqProxTermsWriterPerField 
       // Now termStates has numToMerge FieldMergeStates
       // which all share the same term.  Now we must
       // interleave the docID streams.
-      int numDocs = 0;
+      int docFreq = 0;
       long totTF = 0;
       int docID = 0;
 
       while(true) {
         //System.out.println("  cycle");
-        final int termDocFreq;
+        final int termFreq;
         if (freq.eof()) {
           if (postings.lastDocCodes[termID] != -1) {
             // Return last doc
             docID = postings.lastDocIDs[termID];
             if (readTermFreq) {
-              termDocFreq = postings.docFreqs[termID];
+              termFreq = postings.termFreqs[termID];
             } else {
-              termDocFreq = -1;
+              termFreq = -1;
             }
             postings.lastDocCodes[termID] = -1;
           } else {
@@ -441,20 +441,20 @@ final class FreqProxTermsWriterPerField 
           final int code = freq.readVInt();
           if (!readTermFreq) {
             docID += code;
-            termDocFreq = -1;
+            termFreq = -1;
           } else {
             docID += code >>> 1;
             if ((code & 1) != 0) {
-              termDocFreq = 1;
+              termFreq = 1;
             } else {
-              termDocFreq = freq.readVInt();
+              termFreq = freq.readVInt();
             }
           }
 
           assert docID != postings.lastDocIDs[termID];
         }
 
-        numDocs++;
+        docFreq++;
         assert docID < state.segmentInfo.getDocCount(): "doc=" + docID + " maxDoc=" + state.segmentInfo.getDocCount();
 
         // NOTE: we could check here if the docID was
@@ -469,7 +469,7 @@ final class FreqProxTermsWriterPerField 
         // 2nd sweep does the real flush, but I suspect
         // that'd add too much time to flush.
         visitedDocs.set(docID);
-        postingsConsumer.startDoc(docID, writeTermFreq ? termDocFreq : -1);
+        postingsConsumer.startDoc(docID, writeTermFreq ? termFreq : -1);
         if (docID < delDocLimit) {
           // Mark it deleted.  TODO: we could also skip
           // writing its postings; this would be
@@ -485,7 +485,7 @@ final class FreqProxTermsWriterPerField 
           }
         }
 
-        totTF += termDocFreq;
+        totTF += termFreq;
         
         // Carefully copy over the prox + payload info,
         // changing the format to match Lucene's segment
@@ -495,7 +495,7 @@ final class FreqProxTermsWriterPerField 
           // we did record positions (& maybe payload) and/or offsets
           int position = 0;
           int offset = 0;
-          for(int j=0;j<termDocFreq;j++) {
+          for(int j=0;j<termFreq;j++) {
             final BytesRef thisPayload;
 
             if (readPositions) {
@@ -542,9 +542,9 @@ final class FreqProxTermsWriterPerField 
         }
         postingsConsumer.finishDoc();
       }
-      termsConsumer.finishTerm(text, new TermStats(numDocs, writeTermFreq ? totTF : -1));
+      termsConsumer.finishTerm(text, new TermStats(docFreq, writeTermFreq ? totTF : -1));
       sumTotalTermFreq += totTF;
-      sumDocFreq += numDocs;
+      sumDocFreq += docFreq;
     }
 
     termsConsumer.finish(writeTermFreq ? sumTotalTermFreq : -1, sumDocFreq, visitedDocs.cardinality());