You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ds...@apache.org on 2018/10/18 23:49:26 UTC

[3/3] lucene-solr:master: LUCENE-7875: Moved MultiFields static methods to MultiTerms, FieldInfos and MultiBits. MultiBits is now public and has getLiveDocs.

LUCENE-7875: Moved MultiFields static methods to MultiTerms, FieldInfos and MultiBits.
MultiBits is now public and has getLiveDocs.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/fd916480
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/fd916480
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/fd916480

Branch: refs/heads/master
Commit: fd9164801e703b278922dae6cc3c53e0578fa1d6
Parents: ed8a395
Author: David Smiley <ds...@apache.org>
Authored: Thu Oct 18 19:49:13 2018 -0400
Committer: David Smiley <ds...@apache.org>
Committed: Thu Oct 18 19:49:14 2018 -0400

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   6 +
 .../query/QueryAutoStopWordAnalyzer.java        |  17 ++-
 .../analysis/standard/TestClassicAnalyzer.java  |  12 +-
 .../index/TestBackwardsCompatibility.java       |  12 +-
 .../lucene/benchmark/byTask/tasks/ReadTask.java |   4 +-
 .../quality/utils/QualityQueriesFinder.java     |   4 +-
 .../benchmark/byTask/TestPerfTasksLogic.java    |   7 +-
 .../lucene/classification/BM25NBClassifier.java |   4 +-
 .../BooleanPerceptronClassifier.java            |   4 +-
 .../CachingNaiveBayesClassifier.java            |   8 +-
 .../SimpleNaiveBayesClassifier.java             |   8 +-
 .../SimpleNaiveBayesDocumentClassifier.java     |   6 +-
 .../classification/utils/NearestFuzzyQuery.java |   4 +-
 .../classification/BM25NBClassifierTest.java    |   4 +-
 .../BooleanPerceptronClassifierTest.java        |   4 +-
 .../CachingNaiveBayesClassifierTest.java        |   4 +-
 .../KNearestFuzzyClassifierTest.java            |   4 +-
 .../KNearestNeighborClassifierTest.java         |   4 +-
 .../SimpleNaiveBayesClassifierTest.java         |   4 +-
 .../utils/DocToDoubleVectorUtilsTest.java       |   4 +-
 .../codecs/blocktreeords/TestOrdsBlockTree.java |  16 +-
 .../org/apache/lucene/index/FieldInfos.java     |  34 ++++-
 .../org/apache/lucene/index/IndexReader.java    |   2 +-
 .../java/org/apache/lucene/index/MultiBits.java |  69 +++++----
 .../org/apache/lucene/index/MultiDocValues.java |   2 +-
 .../org/apache/lucene/index/MultiFields.java    | 151 +------------------
 .../apache/lucene/index/MultiLeafReader.java    |  34 +++++
 .../org/apache/lucene/index/MultiTerms.java     |  61 +++++++-
 .../lucene/analysis/TestCachingTokenFilter.java |   8 +-
 .../org/apache/lucene/index/Test2BTerms.java    |   4 +-
 .../index/TestBinaryDocValuesUpdates.java       |   2 +-
 .../apache/lucene/index/TestCustomTermFreq.java |  18 +--
 .../lucene/index/TestDirectoryReader.java       |  26 ++--
 .../org/apache/lucene/index/TestDocCount.java   |   4 +-
 .../lucene/index/TestDocValuesIndexing.java     |   2 +-
 .../apache/lucene/index/TestDocumentWriter.java |   4 +-
 .../lucene/index/TestFilterLeafReader.java      |   2 +-
 .../test/org/apache/lucene/index/TestFlex.java  |   2 +-
 .../lucene/index/TestIndexWriterDelete.java     |   2 +-
 .../index/TestIndexWriterDeleteByQuery.java     |   4 +-
 .../lucene/index/TestIndexWriterExceptions.java |  10 +-
 .../lucene/index/TestIndexWriterReader.java     |   2 +-
 .../lucene/index/TestIndexWriterUnicode.java    |   2 +-
 .../index/TestIndexWriterWithThreads.java       |   2 +-
 .../lucene/index/TestLazyProxSkipping.java      |   4 +-
 .../apache/lucene/index/TestLongPostings.java   |   2 +-
 .../apache/lucene/index/TestMaxPosition.java    |   2 +-
 .../apache/lucene/index/TestMultiFields.java    |   6 +-
 .../index/TestNumericDocValuesUpdates.java      |   2 +-
 .../apache/lucene/index/TestOmitPositions.java  |   2 +-
 .../index/TestParallelCompositeReader.java      |  24 +--
 .../org/apache/lucene/index/TestPayloads.java   |  12 +-
 .../lucene/index/TestPerSegmentDeletes.java     |   2 +-
 .../lucene/index/TestPostingsOffsets.java       |  14 +-
 .../apache/lucene/index/TestSegmentReader.java  |   6 +-
 .../lucene/index/TestSegmentTermEnum.java       |   2 +-
 .../lucene/index/TestStressIndexing2.java       |  24 +--
 .../org/apache/lucene/index/TestSumDocFreq.java |   4 +-
 .../apache/lucene/index/TestTermdocPerf.java    |   2 +-
 .../test/org/apache/lucene/index/TestTerms.java |   4 +-
 .../org/apache/lucene/index/TestTermsEnum.java  |  14 +-
 .../org/apache/lucene/index/TestTermsEnum2.java |   6 +-
 .../lucene/index/TestTransactionRollback.java   |   2 +-
 .../lucene/search/TestAutomatonQuery.java       |   6 +-
 .../lucene/search/TestMultiPhraseQuery.java     |   4 +-
 .../lucene/search/TestPhrasePrefixQuery.java    |   4 +-
 .../lucene/search/TestPositionIncrement.java    |   6 +-
 .../search/TestSameScoresWithThreads.java       |   4 +-
 .../lucene/search/TestShardSearching.java       |   4 +-
 .../org/apache/lucene/search/TestWildcard.java  |   4 +-
 .../org/apache/lucene/util/fst/TestFSTs.java    |   6 +-
 .../directory/DirectoryTaxonomyReader.java      |   4 +-
 .../taxonomy/directory/TaxonomyIndexArrays.java |   6 +-
 .../search/uhighlight/UnifiedHighlighter.java   |   3 +-
 .../lucene/search/join/TestBlockJoin.java       |   7 +-
 .../apache/lucene/search/join/TestJoinUtil.java |   6 +-
 .../memory/TestMemoryIndexAgainstRAMDir.java    |   2 +-
 .../org/apache/lucene/misc/HighFreqTerms.java   |   9 +-
 .../index/TestMultiPassIndexSplitter.java       |   8 +-
 .../lucene/index/TestPKIndexSplitter.java       |   2 +-
 .../valuesource/JoinDocFreqValueSource.java     |   4 +-
 .../apache/lucene/queries/mlt/MoreLikeThis.java |   6 +-
 .../surround/query/SrndPrefixQuery.java         |   4 +-
 .../surround/query/SrndTermQuery.java           |   4 +-
 .../surround/query/SrndTruncQuery.java          |   4 +-
 .../sandbox/queries/FuzzyLikeThisQuery.java     |   4 +-
 .../document/BaseLatLonShapeTestCase.java       |   6 +-
 .../lucene/search/spell/DirectSpellChecker.java |   4 +-
 .../search/spell/HighFrequencyDictionary.java   |   4 +-
 .../lucene/search/spell/LuceneDictionary.java   |   4 +-
 .../search/suggest/DocumentDictionary.java      |   4 +-
 .../suggest/analyzing/FreeTextSuggester.java    |   4 +-
 .../apache/lucene/geo/BaseGeoPointTestCase.java |   8 +-
 .../index/BaseIndexFileFormatTestCase.java      |   2 +-
 .../lucene/index/BasePointsFormatTestCase.java  |   4 +-
 .../index/BasePostingsFormatTestCase.java       |   2 +-
 .../ThreadedIndexingAndSearchingTestCase.java   |   2 +-
 .../search/BaseRangeFieldQueryTestCase.java     |   4 +-
 .../org/apache/lucene/util/LuceneTestCase.java  |  24 +--
 .../java/org/apache/lucene/util/TestUtil.java   |  26 +---
 .../solr/handler/admin/LukeRequestHandler.java  |   4 +-
 .../solr/index/SlowCompositeReaderWrapper.java  |   8 +-
 .../org/apache/solr/request/SimpleFacets.java   |   4 +-
 .../solr/search/function/FileFloatSource.java   |  33 ++--
 .../index/UninvertDocValuesMergePolicyTest.java |   9 +-
 .../org/apache/solr/legacy/TestLegacyTerms.java |  15 +-
 .../solr/schema/TestHalfAndHalfDocValues.java   |   3 +-
 .../org/apache/solr/search/TestRTGBase.java     |   7 +-
 .../solr/uninverting/TestDocTermOrds.java       |   8 +-
 109 files changed, 497 insertions(+), 524 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 3ec6de3..72c0d47 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -127,6 +127,12 @@ Changes in Runtime Behavior
   box anymore. In order to highlight on Block-Join Queries a custom WeightedSpanTermExtractor / FieldQuery
   should be used. (Simon Willnauer, Jim Ferenczi, Julie Tibshiran)
 
+* LUCENE-7875: Moved MultiFields static methods out of the class.  getLiveDocs is now
+  in MultiBits which is now public.  getMergedFieldInfos and getIndexedFields are now in
+  FieldInfos.  getTerms is now in MultiTerms.  getTermPositionsEnum and getTermDocsEnum
+  were collapsed and renamed to just getTermPostingsEnum and moved to MultiTerms.
+  (David Smiley)
+
 New Features
 
 * LUCENE-8340: LongPoint#newDistanceQuery may be used to boost scores based on

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
index 66fdc1b..eabdd8e 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
@@ -17,14 +17,21 @@
 package org.apache.lucene.analysis.query;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.AnalyzerWrapper;
 import org.apache.lucene.analysis.CharArraySet;
 import org.apache.lucene.analysis.StopFilter;
+import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -78,7 +85,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
       Analyzer delegate,
       IndexReader indexReader,
       int maxDocFreq) throws IOException {
-    this(delegate, indexReader, MultiFields.getIndexedFields(indexReader), maxDocFreq);
+    this(delegate, indexReader, FieldInfos.getIndexedFields(indexReader), maxDocFreq);
   }
 
   /**
@@ -96,7 +103,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
       Analyzer delegate,
       IndexReader indexReader,
       float maxPercentDocs) throws IOException {
-    this(delegate, indexReader, MultiFields.getIndexedFields(indexReader), maxPercentDocs);
+    this(delegate, indexReader, FieldInfos.getIndexedFields(indexReader), maxPercentDocs);
   }
 
   /**
@@ -140,7 +147,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
     
     for (String field : fields) {
       Set<String> stopWords = new HashSet<>();
-      Terms terms = MultiFields.getTerms(indexReader, field);
+      Terms terms = MultiTerms.getTerms(indexReader, field);
       CharsRefBuilder spare = new CharsRefBuilder();
       if (terms != null) {
         TermsEnum te = terms.iterator();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
index 45797c0..ffb1d2b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
@@ -17,25 +17,25 @@
 package org.apache.lucene.analysis.standard;
 
 
+import java.io.IOException;
+import java.util.Arrays;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
+import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.BytesRef;
 
-import java.io.IOException;
-import java.util.Arrays;
-
 /** tests for classicanalyzer */
 public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
 
@@ -299,7 +299,7 @@ public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
 
     // Make sure position is still incremented when
     // massive term is skipped:
-    PostingsEnum tps = MultiFields.getTermPositionsEnum(reader,
+    PostingsEnum tps = MultiTerms.getTermPostingsEnum(reader,
                                                                 "content",
                                                                 new BytesRef("another"));
     assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
----------------------------------------------------------------------
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index b1e7466..bc7b6d8 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -881,17 +881,17 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     TestUtil.checkIndex(dir);
     
     // true if this is a 4.0+ index
-    final boolean is40Index = MultiFields.getMergedFieldInfos(reader).fieldInfo("content5") != null;
+    final boolean is40Index = FieldInfos.getMergedFieldInfos(reader).fieldInfo("content5") != null;
     // true if this is a 4.2+ index
-    final boolean is42Index = MultiFields.getMergedFieldInfos(reader).fieldInfo("dvSortedSet") != null;
+    final boolean is42Index = FieldInfos.getMergedFieldInfos(reader).fieldInfo("dvSortedSet") != null;
     // true if this is a 4.9+ index
-    final boolean is49Index = MultiFields.getMergedFieldInfos(reader).fieldInfo("dvSortedNumeric") != null;
+    final boolean is49Index = FieldInfos.getMergedFieldInfos(reader).fieldInfo("dvSortedNumeric") != null;
     // true if this index has points (>= 6.0)
-    final boolean hasPoints = MultiFields.getMergedFieldInfos(reader).fieldInfo("intPoint1d") != null;
+    final boolean hasPoints = FieldInfos.getMergedFieldInfos(reader).fieldInfo("intPoint1d") != null;
 
     assert is40Index;
 
-    final Bits liveDocs = MultiFields.getLiveDocs(reader);
+    final Bits liveDocs = MultiBits.getLiveDocs(reader);
 
     for(int i=0;i<35;i++) {
       if (liveDocs.get(i)) {
@@ -1257,7 +1257,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
     for (String name : oldNames) {
       Directory dir = oldIndexDirs.get(name);
       IndexReader r = DirectoryReader.open(dir);
-      TermsEnum terms = MultiFields.getTerms(r, "content").iterator();
+      TermsEnum terms = MultiTerms.getTerms(r, "content").iterator();
       BytesRef t = terms.next();
       assertNotNull(t);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
----------------------------------------------------------------------
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
index 908808c..f49801a 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
@@ -24,7 +24,7 @@ import org.apache.lucene.benchmark.byTask.feeds.QueryMaker;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiBits;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -88,7 +88,7 @@ public abstract class ReadTask extends PerfTask {
     // optionally warm and add num docs traversed to count
     if (withWarm()) {
       Document doc = null;
-      Bits liveDocs = MultiFields.getLiveDocs(reader);
+      Bits liveDocs = MultiBits.getLiveDocs(reader);
       for (int m = 0; m < reader.maxDoc(); m++) {
         if (null == liveDocs || liveDocs.get(m)) {
           doc = reader.document(m);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java
----------------------------------------------------------------------
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java
index 15ba807..e29a4be 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java
@@ -21,9 +21,9 @@ import java.nio.file.Paths;
 
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.PriorityQueue;
@@ -90,7 +90,7 @@ public class QualityQueriesFinder {
     IndexReader ir = DirectoryReader.open(dir);
     try {
       int threshold = ir.maxDoc() / 10; // ignore words too common.
-      Terms terms = MultiFields.getTerms(ir, field);
+      Terms terms = MultiTerms.getTerms(ir, field);
       if (terms != null) {
         TermsEnum termsEnum = terms.iterator();
         while (termsEnum.next() != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
----------------------------------------------------------------------
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
index d0af909..f1936ad 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
@@ -38,6 +38,7 @@ import org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTask;
 import org.apache.lucene.collation.CollationKeyAnalyzer;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -45,7 +46,7 @@ import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.LogDocMergePolicy;
 import org.apache.lucene.index.LogMergePolicy;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.SegmentInfos;
 import org.apache.lucene.index.SerialMergeScheduler;
@@ -374,13 +375,13 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
 
     int totalTokenCount2 = 0;
 
-    Collection<String> fields = MultiFields.getIndexedFields(reader);
+    Collection<String> fields = FieldInfos.getIndexedFields(reader);
 
     for (String fieldName : fields) {
       if (fieldName.equals(DocMaker.ID_FIELD) || fieldName.equals(DocMaker.DATE_MSEC_FIELD) || fieldName.equals(DocMaker.TIME_SEC_FIELD)) {
         continue;
       }
-      Terms terms = MultiFields.getTerms(reader, fieldName);
+      Terms terms = MultiTerms.getTerms(reader, fieldName);
       if (terms == null) {
         continue;
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/java/org/apache/lucene/classification/BM25NBClassifier.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/BM25NBClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/BM25NBClassifier.java
index b23fc39..5e976a8 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/BM25NBClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/BM25NBClassifier.java
@@ -27,7 +27,7 @@ import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -128,7 +128,7 @@ public class BM25NBClassifier implements Classifier<BytesRef> {
   private List<ClassificationResult<BytesRef>> assignClassNormalizedList(String inputDocument) throws IOException {
     List<ClassificationResult<BytesRef>> assignedClasses = new ArrayList<>();
 
-    Terms classes = MultiFields.getTerms(indexReader, classFieldName);
+    Terms classes = MultiTerms.getTerms(indexReader, classFieldName);
     TermsEnum classesEnum = classes.iterator();
     BytesRef next;
     String[] tokenizedText = tokenize(inputDocument);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
index 394d15f..0008375 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
@@ -28,7 +28,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -80,7 +80,7 @@ public class BooleanPerceptronClassifier implements Classifier<Boolean> {
    */
   public BooleanPerceptronClassifier(IndexReader indexReader, Analyzer analyzer, Query query, Integer batchSize,
                                      Double bias, String classFieldName, String textFieldName) throws IOException {
-    this.textTerms = MultiFields.getTerms(indexReader, textFieldName);
+    this.textTerms = MultiTerms.getTerms(indexReader, textFieldName);
 
     if (textTerms == null) {
       throw new IOException("term vectors need to be available for field " + textFieldName);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java
index 6fe6835..0c3733f 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java
@@ -25,7 +25,7 @@ import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -210,7 +210,7 @@ public class CachingNaiveBayesClassifier extends SimpleNaiveBayesClassifier {
     // build the cache for the word
     Map<String, Long> frequencyMap = new HashMap<>();
     for (String textFieldName : textFieldNames) {
-      TermsEnum termsEnum = MultiFields.getTerms(indexReader, textFieldName).iterator();
+      TermsEnum termsEnum = MultiTerms.getTerms(indexReader, textFieldName).iterator();
       while (termsEnum.next() != null) {
         BytesRef term = termsEnum.term();
         String termText = term.utf8ToString();
@@ -227,7 +227,7 @@ public class CachingNaiveBayesClassifier extends SimpleNaiveBayesClassifier {
     }
 
     // fill the class list
-    Terms terms = MultiFields.getTerms(indexReader, classFieldName);
+    Terms terms = MultiTerms.getTerms(indexReader, classFieldName);
     TermsEnum termsEnum = terms.iterator();
     while ((termsEnum.next()) != null) {
       cclasses.add(BytesRef.deepCopyOf(termsEnum.term()));
@@ -236,7 +236,7 @@ public class CachingNaiveBayesClassifier extends SimpleNaiveBayesClassifier {
     for (BytesRef cclass : cclasses) {
       double avgNumberOfUniqueTerms = 0;
       for (String textFieldName : textFieldNames) {
-        terms = MultiFields.getTerms(indexReader, textFieldName);
+        terms = MultiTerms.getTerms(indexReader, textFieldName);
         long numPostings = terms.getSumDocFreq(); // number of term/doc pairs
         avgNumberOfUniqueTerms += numPostings / (double) terms.getDocCount();
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
index a154649..b044ed4 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
@@ -27,7 +27,7 @@ import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -135,7 +135,7 @@ public class SimpleNaiveBayesClassifier implements Classifier<BytesRef> {
   protected List<ClassificationResult<BytesRef>> assignClassNormalizedList(String inputDocument) throws IOException {
     List<ClassificationResult<BytesRef>> assignedClasses = new ArrayList<>();
 
-    Terms classes = MultiFields.getTerms(indexReader, classFieldName);
+    Terms classes = MultiTerms.getTerms(indexReader, classFieldName);
     if (classes != null) {
       TermsEnum classesEnum = classes.iterator();
       BytesRef next;
@@ -160,7 +160,7 @@ public class SimpleNaiveBayesClassifier implements Classifier<BytesRef> {
    * @throws IOException if accessing to term vectors or search fails
    */
   protected int countDocsWithClass() throws IOException {
-    Terms terms = MultiFields.getTerms(this.indexReader, this.classFieldName);
+    Terms terms = MultiTerms.getTerms(this.indexReader, this.classFieldName);
     int docCount;
     if (terms == null || terms.getDocCount() == -1) { // in case codec doesn't support getDocCount
       TotalHitCountCollector classQueryCountCollector = new TotalHitCountCollector();
@@ -231,7 +231,7 @@ public class SimpleNaiveBayesClassifier implements Classifier<BytesRef> {
   private double getTextTermFreqForClass(Term term) throws IOException {
     double avgNumberOfUniqueTerms = 0;
     for (String textFieldName : textFieldNames) {
-      Terms terms = MultiFields.getTerms(indexReader, textFieldName);
+      Terms terms = MultiTerms.getTerms(indexReader, textFieldName);
       long numPostings = terms.getSumDocFreq(); // number of term/doc pairs
       avgNumberOfUniqueTerms += numPostings / (double) terms.getDocCount(); // avg # of unique terms per doc
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/java/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifier.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifier.java
index f640590..2577dd2 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifier.java
@@ -34,7 +34,7 @@ import org.apache.lucene.classification.SimpleNaiveBayesClassifier;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -103,7 +103,7 @@ public class SimpleNaiveBayesDocumentClassifier extends SimpleNaiveBayesClassifi
     List<ClassificationResult<BytesRef>> assignedClasses = new ArrayList<>();
     Map<String, List<String[]>> fieldName2tokensArray = new LinkedHashMap<>();
     Map<String, Float> fieldName2boost = new LinkedHashMap<>();
-    Terms classes = MultiFields.getTerms(indexReader, classFieldName);
+    Terms classes = MultiTerms.getTerms(indexReader, classFieldName);
     if (classes != null) {
       TermsEnum classesEnum = classes.iterator();
       BytesRef c;
@@ -218,7 +218,7 @@ public class SimpleNaiveBayesDocumentClassifier extends SimpleNaiveBayesClassifi
    */
   private double getTextTermFreqForClass(Term term, String fieldName) throws IOException {
     double avgNumberOfUniqueTerms;
-    Terms terms = MultiFields.getTerms(indexReader, fieldName);
+    Terms terms = MultiTerms.getTerms(indexReader, fieldName);
     long numPostings = terms.getSumDocFreq(); // number of term/doc pairs
     avgNumberOfUniqueTerms = numPostings / (double) terms.getDocCount(); // avg # of unique terms per doc
     int docsWithC = indexReader.docFreq(term);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/java/org/apache/lucene/classification/utils/NearestFuzzyQuery.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/utils/NearestFuzzyQuery.java b/lucene/classification/src/java/org/apache/lucene/classification/utils/NearestFuzzyQuery.java
index 308dcdc..c586fe8 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/utils/NearestFuzzyQuery.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/utils/NearestFuzzyQuery.java
@@ -27,7 +27,7 @@ import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermStates;
 import org.apache.lucene.index.Terms;
@@ -140,7 +140,7 @@ public class NearestFuzzyQuery extends Query {
 
   private void addTerms(IndexReader reader, FieldVals f, ScoreTermQueue q) throws IOException {
     if (f.queryString == null) return;
-    final Terms terms = MultiFields.getTerms(reader, f.fieldName);
+    final Terms terms = MultiTerms.getTerms(reader, f.fieldName);
     if (terms == null) {
       return;
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/test/org/apache/lucene/classification/BM25NBClassifierTest.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/BM25NBClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/BM25NBClassifierTest.java
index 050073c..4a4c985 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/BM25NBClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/BM25NBClassifierTest.java
@@ -24,7 +24,7 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
 import org.apache.lucene.analysis.reverse.ReverseStringFilter;
 import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
 import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -122,7 +122,7 @@ public class BM25NBClassifierTest extends ClassificationTestBase<BytesRef> {
       assertTrue(precision >= 0d);
       assertTrue(precision <= 1d);
 
-      Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
+      Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
       TermsEnum iterator = terms.iterator();
       BytesRef term;
       while ((term = iterator.next()) != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/test/org/apache/lucene/classification/BooleanPerceptronClassifierTest.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/BooleanPerceptronClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/BooleanPerceptronClassifierTest.java
index 9f22e6f..9670cf8 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/BooleanPerceptronClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/BooleanPerceptronClassifierTest.java
@@ -19,7 +19,7 @@ package org.apache.lucene.classification;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
 import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -111,7 +111,7 @@ public class BooleanPerceptronClassifierTest extends ClassificationTestBase<Bool
       assertTrue(precision >= 0d);
       assertTrue(precision <= 1d);
 
-      Terms terms = MultiFields.getTerms(leafReader, booleanFieldName);
+      Terms terms = MultiTerms.getTerms(leafReader, booleanFieldName);
       TermsEnum iterator = terms.iterator();
       BytesRef term;
       while ((term = iterator.next()) != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
index 8669df4..771ff9d 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
@@ -25,7 +25,7 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
 import org.apache.lucene.analysis.reverse.ReverseStringFilter;
 import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
 import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -116,7 +116,7 @@ public class CachingNaiveBayesClassifierTest extends ClassificationTestBase<Byte
       assertTrue(precision >= 0d);
       assertTrue(precision <= 1d);
 
-      Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
+      Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
       TermsEnum iterator = terms.iterator();
       BytesRef term;
       while ((term = iterator.next()) != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/test/org/apache/lucene/classification/KNearestFuzzyClassifierTest.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/KNearestFuzzyClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/KNearestFuzzyClassifierTest.java
index 91cf943..3138bd6 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/KNearestFuzzyClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/KNearestFuzzyClassifierTest.java
@@ -19,7 +19,7 @@ package org.apache.lucene.classification;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
 import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -90,7 +90,7 @@ public class KNearestFuzzyClassifierTest extends ClassificationTestBase<BytesRef
       assertTrue(precision >= 0d);
       assertTrue(precision <= 1d);
 
-      Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
+      Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
       TermsEnum iterator = terms.iterator();
       BytesRef term;
       while ((term = iterator.next()) != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java
index ab73185..3d618b4 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java
@@ -23,7 +23,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.en.EnglishAnalyzer;
 import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
 import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -147,7 +147,7 @@ public class KNearestNeighborClassifierTest extends ClassificationTestBase<Bytes
       assertTrue(precision >= 0d);
       assertTrue(precision <= 1d);
 
-      Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
+      Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
       TermsEnum iterator = terms.iterator();
       BytesRef term;
       while ((term = iterator.next()) != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
index 18f59b7..74569e2 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
@@ -24,7 +24,7 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
 import org.apache.lucene.analysis.reverse.ReverseStringFilter;
 import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
 import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
@@ -124,7 +124,7 @@ public class SimpleNaiveBayesClassifierTest extends ClassificationTestBase<Bytes
       assertTrue(precision >= 0d);
       assertTrue(precision <= 1d);
 
-      Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
+      Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
       TermsEnum iterator = terms.iterator();
       BytesRef term;
       while ((term = iterator.next()) != null) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java b/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
index a90d1a5..25c4100 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
@@ -21,7 +21,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.search.IndexSearcher;
@@ -89,7 +89,7 @@ public class DocToDoubleVectorUtilsTest extends LuceneTestCase {
 
   @Test
   public void testSparseFreqDoubleArrayConversion() throws Exception {
-    Terms fieldTerms = MultiFields.getTerms(index, "text");
+    Terms fieldTerms = MultiTerms.getTerms(index, "text");
     if (fieldTerms != null && fieldTerms.size() != -1) {
       IndexSearcher indexSearcher = new IndexSearcher(index);
       for (ScoreDoc scoreDoc : indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java b/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
index 66e5f9c..c246513 100644
--- a/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
+++ b/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
@@ -31,7 +31,7 @@ import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.store.Directory;
@@ -53,7 +53,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
     doc.add(newTextField("field", "a b c", Field.Store.NO));
     w.addDocument(doc);
     IndexReader r = w.getReader();
-    TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+    TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
 
     // Test next()
     assertEquals(new BytesRef("a"), te.next());
@@ -114,7 +114,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
     }
     w.forceMerge(1);
     IndexReader r = w.getReader();
-    TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+    TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
 
     assertTrue(te.seekExact(new BytesRef("mo")));
     assertEquals(27, te.ord());
@@ -190,7 +190,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
     }
     w.forceMerge(1);
     IndexReader r = w.getReader();
-    TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+    TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
 
     if (VERBOSE) {
       while (te.next() != null) {
@@ -250,7 +250,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
     }
     w.forceMerge(1);
     IndexReader r = DirectoryReader.open(w);
-    TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+    TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
 
     if (VERBOSE) {
       BytesRef term;
@@ -300,7 +300,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
     }
     w.forceMerge(1);
     IndexReader r = DirectoryReader.open(w);
-    TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+    TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
 
     BytesRef term;
     int ord = 0;
@@ -338,7 +338,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
     }
     w.forceMerge(1);
     IndexReader r = DirectoryReader.open(w);
-    TermsEnum te = MultiFields.getTerms(r, "body").iterator();
+    TermsEnum te = MultiTerms.getTerms(r, "body").iterator();
 
     for(int i=0;i<30;i++) {
       for(int j=0;j<30;j++) {
@@ -380,7 +380,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
 
     w.forceMerge(1);
     IndexReader r = w.getReader();
-    TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+    TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
     assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef(new byte[] {0x22})));
     assertEquals("a", te.term().utf8ToString());
     assertEquals(1L, te.ord());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
index 1e0ef94..580c50c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
@@ -24,9 +24,12 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.Arrays;
 import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
 
 import org.apache.lucene.util.ArrayUtil;
 
@@ -122,7 +125,36 @@ public class FieldInfos implements Iterable<FieldInfo> {
     }
     values = Collections.unmodifiableCollection(Arrays.asList(valuesTemp.toArray(new FieldInfo[0])));
   }
-  
+
+  /** Call this to get the (merged) FieldInfos for a
+   *  composite reader.
+   *  <p>
+   *  NOTE: the returned field numbers will likely not
+   *  correspond to the actual field numbers in the underlying
+   *  readers, and codec metadata ({@link FieldInfo#getAttribute(String)}
+   *  will be unavailable.
+   */
+  public static FieldInfos getMergedFieldInfos(IndexReader reader) {
+    final String softDeletesField = reader.leaves().stream()
+        .map(l -> l.reader().getFieldInfos().getSoftDeletesField())
+        .filter(Objects::nonNull)
+        .findAny().orElse(null);
+    final Builder builder = new Builder(new FieldNumbers(softDeletesField));
+    for(final LeafReaderContext ctx : reader.leaves()) {
+      builder.add(ctx.reader().getFieldInfos());
+    }
+    return builder.finish();
+  }
+
+  /** Returns a set of names of fields that have a terms index.  The order is undefined. */
+  public static Collection<String> getIndexedFields(IndexReader reader) {
+    return reader.leaves().stream()
+        .flatMap(l -> StreamSupport.stream(l.reader().getFieldInfos().spliterator(), false)
+        .filter(fi -> fi.getIndexOptions() != IndexOptions.NONE))
+        .map(fi -> fi.name)
+        .collect(Collectors.toSet());
+  }
+
   /** Returns true if any fields have freqs */
   public boolean hasFreq() {
     return hasFreq;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
index 71e6e88..c87f5da 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
@@ -331,7 +331,7 @@ public abstract class IndexReader implements Closeable {
    * requested document is deleted, and therefore asking for a deleted document
    * may yield unspecified results. Usually this is not required, however you
    * can test if the doc is deleted by checking the {@link
-   * Bits} returned from {@link MultiFields#getLiveDocs}.
+   * Bits} returned from {@link MultiBits#getLiveDocs}.
    *
    * <b>NOTE:</b> only the content of a field is returned,
    * if that field was stored during indexing.  Metadata

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/java/org/apache/lucene/index/MultiBits.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiBits.java b/lucene/core/src/java/org/apache/lucene/index/MultiBits.java
index 3f4397f..b1f5f51 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiBits.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiBits.java
@@ -16,6 +16,8 @@
  */
 package org.apache.lucene.index;
 
+import java.util.List;
+
 import org.apache.lucene.util.Bits;
 
 
@@ -27,7 +29,7 @@ import org.apache.lucene.util.Bits;
  *
  * @lucene.experimental
  */
-final class MultiBits implements Bits {
+public final class MultiBits implements Bits {
   private final Bits[] subs;
 
   // length is 1+subs.length (the last entry has the maxDoc):
@@ -35,13 +37,45 @@ final class MultiBits implements Bits {
 
   private final boolean defaultValue;
 
-  public MultiBits(Bits[] subs, int[] starts, boolean defaultValue) {
+  private MultiBits(Bits[] subs, int[] starts, boolean defaultValue) {
     assert starts.length == 1+subs.length;
     this.subs = subs;
     this.starts = starts;
     this.defaultValue = defaultValue;
   }
 
+  /** Returns a single {@link Bits} instance for this
+   *  reader, merging live Documents on the
+   *  fly.  This method will return null if the reader
+   *  has no deletions.
+   *
+   *  <p><b>NOTE</b>: this is a very slow way to access live docs.
+   *  For example, each Bits access will require a binary search.
+   *  It's better to get the sub-readers and iterate through them
+   *  yourself. */
+  public static Bits getLiveDocs(IndexReader reader) {
+    if (reader.hasDeletions()) {
+      final List<LeafReaderContext> leaves = reader.leaves();
+      final int size = leaves.size();
+      assert size > 0 : "A reader with deletions must have at least one leave";
+      if (size == 1) {
+        return leaves.get(0).reader().getLiveDocs();
+      }
+      final Bits[] liveDocs = new Bits[size];
+      final int[] starts = new int[size + 1];
+      for (int i = 0; i < size; i++) {
+        // record all liveDocs, even if they are null
+        final LeafReaderContext ctx = leaves.get(i);
+        liveDocs[i] = ctx.reader().getLiveDocs();
+        starts[i] = ctx.docBase;
+      }
+      starts[size] = reader.maxDoc();
+      return new MultiBits(liveDocs, starts, true);
+    } else {
+      return null;
+    }
+  }
+
   private boolean checkLength(int reader, int doc) {
     final int length = starts[1+reader]-starts[reader];
     assert doc - starts[reader] < length: "doc=" + doc + " reader=" + reader + " starts[reader]=" + starts[reader] + " length=" + length;
@@ -79,37 +113,6 @@ final class MultiBits implements Bits {
     return b.toString();
   }
 
-  /**
-   * Represents a sub-Bits from 
-   * {@link MultiBits#getMatchingSub(org.apache.lucene.index.ReaderSlice) getMatchingSub()}.
-   */
-  public final static class SubResult {
-    public boolean matches;
-    public Bits result;
-  }
-
-  /**
-   * Returns a sub-Bits matching the provided <code>slice</code>
-   * <p>
-   * Because <code>null</code> usually has a special meaning for
-   * Bits (e.g. no deleted documents), you must check
-   * {@link SubResult#matches} instead to ensure the sub was 
-   * actually found.
-   */
-  public SubResult getMatchingSub(ReaderSlice slice) {
-    int reader = ReaderUtil.subIndex(slice.start, starts);
-    assert reader != -1;
-    assert reader < subs.length: "slice=" + slice + " starts[-1]=" + starts[starts.length-1];
-    final SubResult subResult = new SubResult();
-    if (starts[reader] == slice.start && starts[1+reader] == slice.start+slice.length) {
-      subResult.matches = true;
-      subResult.result = subs[reader];
-    } else {
-      subResult.matches = false;
-    }
-    return subResult;
-  }
-
   @Override
   public int length() {
     return starts[starts.length-1];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java
index f5f5934..b9ee2f5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java
@@ -56,7 +56,7 @@ public class MultiDocValues {
     } else if (size == 1) {
       return leaves.get(0).reader().getNormValues(field);
     }
-    FieldInfo fi = MultiFields.getMergedFieldInfos(r).fieldInfo(field);
+    FieldInfo fi = FieldInfos.getMergedFieldInfos(r).fieldInfo(field); //TODO avoid merging
     if (fi == null || fi.hasNorms() == false) {
       return null;
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
index 49a85a3..f4200da 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
@@ -19,17 +19,11 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Objects;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.stream.Collectors;
-import java.util.stream.StreamSupport;
 
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.MergedIterator;
 
 /**
@@ -53,123 +47,9 @@ public final class MultiFields extends Fields {
   private final ReaderSlice[] subSlices;
   private final Map<String,Terms> terms = new ConcurrentHashMap<>();
 
-  /** Returns a single {@link Bits} instance for this
-   *  reader, merging live Documents on the
-   *  fly.  This method will return null if the reader 
-   *  has no deletions.
-   *
-   *  <p><b>NOTE</b>: this is a very slow way to access live docs.
-   *  For example, each Bits access will require a binary search.
-   *  It's better to get the sub-readers and iterate through them
-   *  yourself. */
-  public static Bits getLiveDocs(IndexReader reader) {
-    if (reader.hasDeletions()) {
-      final List<LeafReaderContext> leaves = reader.leaves();
-      final int size = leaves.size();
-      assert size > 0 : "A reader with deletions must have at least one leave";
-      if (size == 1) {
-        return leaves.get(0).reader().getLiveDocs();
-      }
-      final Bits[] liveDocs = new Bits[size];
-      final int[] starts = new int[size + 1];
-      for (int i = 0; i < size; i++) {
-        // record all liveDocs, even if they are null
-        final LeafReaderContext ctx = leaves.get(i);
-        liveDocs[i] = ctx.reader().getLiveDocs();
-        starts[i] = ctx.docBase;
-      }
-      starts[size] = reader.maxDoc();
-      return new MultiBits(liveDocs, starts, true);
-    } else {
-      return null;
-    }
-  }
-
-  /** This method may return null if the field does not exist or if it has no terms. */
-  public static Terms getTerms(IndexReader r, String field) throws IOException {
-    final List<LeafReaderContext> leaves = r.leaves();
-    if (leaves.size() == 1) {
-      return leaves.get(0).reader().terms(field);
-    }
-
-    final List<Terms> termsPerLeaf = new ArrayList<>(leaves.size());
-    final List<ReaderSlice> slicePerLeaf = new ArrayList<>(leaves.size());
-
-    for (int leafIdx = 0; leafIdx < leaves.size(); leafIdx++) {
-      LeafReaderContext ctx = leaves.get(leafIdx);
-      Terms subTerms = ctx.reader().terms(field);
-      if (subTerms != null) {
-        termsPerLeaf.add(subTerms);
-        slicePerLeaf.add(new ReaderSlice(ctx.docBase, r.maxDoc(), leafIdx));
-      }
-    }
-
-    if (termsPerLeaf.size() == 0) {
-      return null;
-    } else {
-      return new MultiTerms(termsPerLeaf.toArray(Terms.EMPTY_ARRAY),
-          slicePerLeaf.toArray(ReaderSlice.EMPTY_ARRAY));
-    }
-  }
-  
-  /** Returns {@link PostingsEnum} for the specified field and
-   *  term.  This will return null if the field or term does
-   *  not exist. */
-  public static PostingsEnum getTermDocsEnum(IndexReader r, String field, BytesRef term) throws IOException {
-    return getTermDocsEnum(r, field, term, PostingsEnum.FREQS);
-  }
-  
-  /** Returns {@link PostingsEnum} for the specified field and
-   *  term, with control over whether freqs are required.
-   *  Some codecs may be able to optimize their
-   *  implementation when freqs are not required.  This will
-   *  return null if the field or term does not exist.  See {@link
-   *  TermsEnum#postings(PostingsEnum,int)}.*/
-  public static PostingsEnum getTermDocsEnum(IndexReader r, String field, BytesRef term, int flags) throws IOException {
-    assert field != null;
-    assert term != null;
-    final Terms terms = getTerms(r, field);
-    if (terms != null) {
-      final TermsEnum termsEnum = terms.iterator();
-      if (termsEnum.seekExact(term)) {
-        return termsEnum.postings(null, flags);
-      }
-    }
-    return null;
-  }
-
-  /** Returns {@link PostingsEnum} for the specified
-   *  field and term.  This will return null if the field or
-   *  term does not exist or positions were not indexed. 
-   *  @see #getTermPositionsEnum(IndexReader, String, BytesRef, int) */
-  public static PostingsEnum getTermPositionsEnum(IndexReader r, String field, BytesRef term) throws IOException {
-    return getTermPositionsEnum(r, field, term, PostingsEnum.ALL);
-  }
-
-  /** Returns {@link PostingsEnum} for the specified
-   *  field and term, with control over whether offsets and payloads are
-   *  required.  Some codecs may be able to optimize
-   *  their implementation when offsets and/or payloads are not
-   *  required. This will return null if the field or term does not
-   *  exist. See {@link TermsEnum#postings(PostingsEnum,int)}. */
-  public static PostingsEnum getTermPositionsEnum(IndexReader r, String field, BytesRef term, int flags) throws IOException {
-    assert field != null;
-    assert term != null;
-    final Terms terms = getTerms(r, field);
-    if (terms != null) {
-      final TermsEnum termsEnum = terms.iterator();
-      if (termsEnum.seekExact(term)) {
-        return termsEnum.postings(null, flags);
-      }
-    }
-    return null;
-  }
-
   /**
-   * Expert: construct a new MultiFields instance directly.
-   * @lucene.internal
+   * Sole constructor.
    */
-  // TODO: why is this public?
   public MultiFields(Fields[] subs, ReaderSlice[] subSlices) {
     this.subs = subs;
     this.subSlices = subSlices;
@@ -223,34 +103,5 @@ public final class MultiFields extends Fields {
     return -1;
   }
 
-  /** Call this to get the (merged) FieldInfos for a
-   *  composite reader. 
-   *  <p>
-   *  NOTE: the returned field numbers will likely not
-   *  correspond to the actual field numbers in the underlying
-   *  readers, and codec metadata ({@link FieldInfo#getAttribute(String)}
-   *  will be unavailable.
-   */
-  public static FieldInfos getMergedFieldInfos(IndexReader reader) {
-    final String softDeletesField = reader.leaves().stream()
-        .map(l -> l.reader().getFieldInfos().getSoftDeletesField())
-        .filter(Objects::nonNull)
-        .findAny().orElse(null);
-    final FieldInfos.Builder builder = new FieldInfos.Builder(new FieldInfos.FieldNumbers(softDeletesField));
-    for(final LeafReaderContext ctx : reader.leaves()) {
-      builder.add(ctx.reader().getFieldInfos());
-    }
-    return builder.finish();
-  }
-
-  /** Returns a set of names of fields that have a terms index.  The order is undefined. */
-  public static Collection<String> getIndexedFields(IndexReader reader) {
-    return reader.leaves().stream()
-        .flatMap(l -> StreamSupport.stream(l.reader().getFieldInfos().spliterator(), false)
-        .filter(fi -> fi.getIndexOptions() != IndexOptions.NONE))
-        .map(fi -> fi.name)
-        .collect(Collectors.toSet());
-  }
-
 }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/java/org/apache/lucene/index/MultiLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/MultiLeafReader.java
new file mode 100644
index 0000000..d04e57a
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiLeafReader.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+/**
+ * Utility methods for working with a {@link IndexReader} as if it were a {@link LeafReader}.
+ *
+ * <p><b>NOTE</b>: for composite readers, you'll get better
+ * performance by gathering the sub readers using
+ * {@link IndexReader#getContext()} to get the
+ * atomic leaves and then operate per-LeafReader,
+ * instead of using this class.
+ * @lucene.experimental
+ */
+public class MultiLeafReader {
+
+  private MultiLeafReader() {}
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java b/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
index 203e366..2e599fa 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
@@ -31,7 +31,6 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
  *
  * @lucene.experimental
  */
-
 public final class MultiTerms extends Terms {
   private final Terms[] subs;
   private final ReaderSlice[] subSlices;
@@ -40,13 +39,15 @@ public final class MultiTerms extends Terms {
   private final boolean hasPositions;
   private final boolean hasPayloads;
 
-  /** Sole constructor.
+  /**
+   * Sole constructor.  Use {@link #getTerms(IndexReader, String)} instead if possible.
    *
    * @param subs The {@link Terms} instances of all sub-readers. 
    * @param subSlices A parallel array (matching {@code
    *        subs}) describing the sub-reader slices.
+   * @lucene.internal
    */
-  public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException {
+  public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException { //TODO make private?
     this.subs = subs;
     this.subSlices = subSlices;
     
@@ -68,6 +69,60 @@ public final class MultiTerms extends Terms {
     hasPayloads = hasPositions && _hasPayloads; // if all subs have pos, and at least one has payloads.
   }
 
+  /** This method may return null if the field does not exist or if it has no terms. */
+  public static Terms getTerms(IndexReader r, String field) throws IOException {
+    final List<LeafReaderContext> leaves = r.leaves();
+    if (leaves.size() == 1) {
+      return leaves.get(0).reader().terms(field);
+    }
+
+    final List<Terms> termsPerLeaf = new ArrayList<>(leaves.size());
+    final List<ReaderSlice> slicePerLeaf = new ArrayList<>(leaves.size());
+
+    for (int leafIdx = 0; leafIdx < leaves.size(); leafIdx++) {
+      LeafReaderContext ctx = leaves.get(leafIdx);
+      Terms subTerms = ctx.reader().terms(field);
+      if (subTerms != null) {
+        termsPerLeaf.add(subTerms);
+        slicePerLeaf.add(new ReaderSlice(ctx.docBase, r.maxDoc(), leafIdx));
+      }
+    }
+
+    if (termsPerLeaf.size() == 0) {
+      return null;
+    } else {
+      return new MultiTerms(termsPerLeaf.toArray(EMPTY_ARRAY),
+          slicePerLeaf.toArray(ReaderSlice.EMPTY_ARRAY));
+    }
+  }
+
+  /** Returns {@link PostingsEnum} for the specified
+   *  field and term.  This will return null if the field or
+   *  term does not exist or positions were not indexed.
+   *  @see #getTermPostingsEnum(IndexReader, String, BytesRef, int) */
+  public static PostingsEnum getTermPostingsEnum(IndexReader r, String field, BytesRef term) throws IOException {
+    return getTermPostingsEnum(r, field, term, PostingsEnum.ALL);
+  }
+
+  /** Returns {@link PostingsEnum} for the specified
+   *  field and term, with control over whether freqs, positions, offsets or payloads
+   *  are required.  Some codecs may be able to optimize
+   *  their implementation when offsets and/or payloads are not
+   *  required. This will return null if the field or term does not
+   *  exist. See {@link TermsEnum#postings(PostingsEnum,int)}. */
+  public static PostingsEnum getTermPostingsEnum(IndexReader r, String field, BytesRef term, int flags) throws IOException {
+    assert field != null;
+    assert term != null;
+    final Terms terms = getTerms(r, field);
+    if (terms != null) {
+      final TermsEnum termsEnum = terms.iterator();
+      if (termsEnum.seekExact(term)) {
+        return termsEnum.postings(null, flags);
+      }
+    }
+    return null;
+  }
+
   /** Expert: returns the Terms being merged. */
   public Terms[] getSubTerms() {
     return subs;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
index 39248ee..67edcbf 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
@@ -26,7 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.MultiTerms;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -84,14 +84,14 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
     writer.addDocument(doc);
     
     IndexReader reader = writer.getReader();
-    PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
+    PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader,
                                                                           "preanalyzed",
                                                                           new BytesRef("term1"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     assertEquals(1, termPositions.freq());
     assertEquals(0, termPositions.nextPosition());
 
-    termPositions = MultiFields.getTermPositionsEnum(reader,
+    termPositions = MultiTerms.getTermPostingsEnum(reader,
                                                      "preanalyzed",
                                                      new BytesRef("term2"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -99,7 +99,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
     assertEquals(1, termPositions.nextPosition());
     assertEquals(3, termPositions.nextPosition());
     
-    termPositions = MultiFields.getTermPositionsEnum(reader,
+    termPositions = MultiTerms.getTermPostingsEnum(reader,
                                                      "preanalyzed",
                                                      new BytesRef("term3"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
index 0c2c05f..f43cc32 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
@@ -237,7 +237,7 @@ public class Test2BTerms extends LuceneTestCase {
 
   private List<BytesRef> findTerms(IndexReader r) throws IOException {
     System.out.println("TEST: findTerms");
-    final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
+    final TermsEnum termsEnum = MultiTerms.getTerms(r, "field").iterator();
     final List<BytesRef> savedTerms = new ArrayList<>();
     int nextSave = TestUtil.nextInt(random(), 500000, 1000000);
     BytesRef term;
@@ -255,7 +255,7 @@ public class Test2BTerms extends LuceneTestCase {
     System.out.println("TEST: run " + terms.size() + " terms on reader=" + r);
     IndexSearcher s = newSearcher(r);
     Collections.shuffle(terms, random());
-    TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
+    TermsEnum termsEnum = MultiTerms.getTerms(r, "field").iterator();
     boolean failed = false;
     for(int iter=0;iter<10*terms.size();iter++) {
       final BytesRef term = terms.get(random().nextInt(terms.size()));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java
index a6632d8..1a63863 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java
@@ -266,7 +266,7 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
       writer.close();
     }
     
-    Bits liveDocs = MultiFields.getLiveDocs(reader);
+    Bits liveDocs = MultiBits.getLiveDocs(reader);
     boolean[] expectedLiveDocs = new boolean[] { true, false, false, true, true, true };
     for (int i = 0; i < expectedLiveDocs.length; i++) {
       assertEquals(expectedLiveDocs[i], liveDocs.get(i));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/index/TestCustomTermFreq.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCustomTermFreq.java b/lucene/core/src/test/org/apache/lucene/index/TestCustomTermFreq.java
index 8bb81d2..cddac79 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCustomTermFreq.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCustomTermFreq.java
@@ -85,13 +85,13 @@ public class TestCustomTermFreq extends LuceneTestCase {
     doc.add(field);
     w.addDocument(doc);
     IndexReader r = DirectoryReader.open(w);
-    PostingsEnum postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("bar"));
+    PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("bar"), (int) PostingsEnum.FREQS);
     assertNotNull(postings);
     assertEquals(0, postings.nextDoc());
     assertEquals(128, postings.freq());
     assertEquals(NO_MORE_DOCS, postings.nextDoc());
 
-    postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("foo"));
+    postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("foo"), (int) PostingsEnum.FREQS);
     assertNotNull(postings);
     assertEquals(0, postings.nextDoc());
     assertEquals(42, postings.freq());
@@ -123,7 +123,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
     w.addDocument(doc);
     
     IndexReader r = DirectoryReader.open(w);
-    PostingsEnum postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("bar"));
+    PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("bar"), (int) PostingsEnum.FREQS);
     assertNotNull(postings);
     assertEquals(0, postings.nextDoc());
     assertEquals(128, postings.freq());
@@ -131,7 +131,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
     assertEquals(50, postings.freq());
     assertEquals(NO_MORE_DOCS, postings.nextDoc());
 
-    postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("foo"));
+    postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("foo"), (int) PostingsEnum.FREQS);
     assertNotNull(postings);
     assertEquals(0, postings.nextDoc());
     assertEquals(42, postings.freq());
@@ -156,13 +156,13 @@ public class TestCustomTermFreq extends LuceneTestCase {
     doc.add(field);
     w.addDocument(doc);
     IndexReader r = DirectoryReader.open(w);
-    PostingsEnum postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("bar"));
+    PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("bar"), (int) PostingsEnum.FREQS);
     assertNotNull(postings);
     assertEquals(0, postings.nextDoc());
     assertEquals(228, postings.freq());
     assertEquals(NO_MORE_DOCS, postings.nextDoc());
 
-    postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("foo"));
+    postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("foo"), (int) PostingsEnum.FREQS);
     assertNotNull(postings);
     assertEquals(0, postings.nextDoc());
     assertEquals(59, postings.freq());
@@ -195,7 +195,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
     w.addDocument(doc);
 
     IndexReader r = DirectoryReader.open(w);
-    PostingsEnum postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("bar"));
+    PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("bar"), (int) PostingsEnum.FREQS);
     assertNotNull(postings);
     assertEquals(0, postings.nextDoc());
     assertEquals(228, postings.freq());
@@ -203,7 +203,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
     assertEquals(140, postings.freq());
     assertEquals(NO_MORE_DOCS, postings.nextDoc());
 
-    postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("foo"));
+    postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("foo"), (int) PostingsEnum.FREQS);
     assertNotNull(postings);
     assertEquals(0, postings.nextDoc());
     assertEquals(59, postings.freq());
@@ -239,7 +239,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
 
     IndexReader r = DirectoryReader.open(w);
 
-    TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
+    TermsEnum termsEnum = MultiTerms.getTerms(r, "field").iterator();
     assertTrue(termsEnum.seekExact(new BytesRef("foo")));
     assertEquals(179, termsEnum.totalTermFreq());
     assertTrue(termsEnum.seekExact(new BytesRef("bar")));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
index dbc7ffa..67fb3df 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
@@ -93,7 +93,7 @@ public class TestDirectoryReader extends LuceneTestCase {
     MultiReader mr3 = new MultiReader(readers2);
 
     // test mixing up TermDocs and TermEnums from different readers.
-    TermsEnum te2 = MultiFields.getTerms(mr2, "body").iterator();
+    TermsEnum te2 = MultiTerms.getTerms(mr2, "body").iterator();
     te2.seekCeil(new BytesRef("wow"));
     PostingsEnum td = TestUtil.docs(random(), mr2,
         "body",
@@ -101,7 +101,7 @@ public class TestDirectoryReader extends LuceneTestCase {
         null,
         0);
 
-    TermsEnum te3 = MultiFields.getTerms(mr3, "body").iterator();
+    TermsEnum te3 = MultiTerms.getTerms(mr3, "body").iterator();
     te3.seekCeil(new BytesRef("wow"));
     td = TestUtil.docs(random(), te3,
         td,
@@ -185,7 +185,7 @@ public class TestDirectoryReader extends LuceneTestCase {
     writer.close();
     // set up reader
     DirectoryReader reader = DirectoryReader.open(d);
-    FieldInfos fieldInfos = MultiFields.getMergedFieldInfos(reader);
+    FieldInfos fieldInfos = FieldInfos.getMergedFieldInfos(reader);
     assertNotNull(fieldInfos.fieldInfo("keyword"));
     assertNotNull(fieldInfos.fieldInfo("text"));
     assertNotNull(fieldInfos.fieldInfo("unindexed"));
@@ -246,7 +246,7 @@ public class TestDirectoryReader extends LuceneTestCase {
 
     // verify fields again
     reader = DirectoryReader.open(d);
-    fieldInfos = MultiFields.getMergedFieldInfos(reader);
+    fieldInfos = FieldInfos.getMergedFieldInfos(reader);
 
     Collection<String> allFieldNames = new HashSet<>();
     Collection<String> indexedFieldNames = new HashSet<>();
@@ -559,8 +559,8 @@ public class TestDirectoryReader extends LuceneTestCase {
     assertEquals("Single segment test differs.", index1.leaves().size() == 1, index2.leaves().size() == 1);
 
     // check field names
-    FieldInfos fieldInfos1 = MultiFields.getMergedFieldInfos(index1);
-    FieldInfos fieldInfos2 = MultiFields.getMergedFieldInfos(index2);
+    FieldInfos fieldInfos1 = FieldInfos.getMergedFieldInfos(index1);
+    FieldInfos fieldInfos2 = FieldInfos.getMergedFieldInfos(index2);
     assertEquals("IndexReaders have different numbers of fields.", fieldInfos1.size(), fieldInfos2.size());
     final int numFields = fieldInfos1.size();
     for(int fieldID=0;fieldID<numFields;fieldID++) {
@@ -591,8 +591,8 @@ public class TestDirectoryReader extends LuceneTestCase {
     }
     
     // check deletions
-    final Bits liveDocs1 = MultiFields.getLiveDocs(index1);
-    final Bits liveDocs2 = MultiFields.getLiveDocs(index2);
+    final Bits liveDocs1 = MultiBits.getLiveDocs(index1);
+    final Bits liveDocs2 = MultiBits.getLiveDocs(index2);
     for (int i = 0; i < index1.maxDoc(); i++) {
       assertEquals("Doc " + i + " only deleted in one index.",
                    liveDocs1 == null || !liveDocs1.get(i),
@@ -619,19 +619,19 @@ public class TestDirectoryReader extends LuceneTestCase {
     }
     
     // check dictionary and posting lists
-    TreeSet<String> fields1 = new TreeSet<>(MultiFields.getIndexedFields(index1));
-    TreeSet<String> fields2 = new TreeSet<>(MultiFields.getIndexedFields(index2));
+    TreeSet<String> fields1 = new TreeSet<>(FieldInfos.getIndexedFields(index1));
+    TreeSet<String> fields2 = new TreeSet<>(FieldInfos.getIndexedFields(index2));
     Iterator<String> fenum2 = fields2.iterator();
     for (String field1 : fields1) {
       assertEquals("Different fields", field1, fenum2.next());
-      Terms terms1 = MultiFields.getTerms(index1, field1);
+      Terms terms1 = MultiTerms.getTerms(index1, field1);
       if (terms1 == null) {
-        assertNull(MultiFields.getTerms(index2, field1));
+        assertNull(MultiTerms.getTerms(index2, field1));
         continue;
       }
       TermsEnum enum1 = terms1.iterator();
 
-      Terms terms2 = MultiFields.getTerms(index2, field1);
+      Terms terms2 = MultiTerms.getTerms(index2, field1);
       assertNotNull(terms2);
       TermsEnum enum2 = terms2.iterator();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java b/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
index 35771cf..f7a105e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
@@ -59,9 +59,9 @@ public class TestDocCount extends LuceneTestCase {
   }
   
   private void verifyCount(IndexReader ir) throws Exception {
-    final Collection<String> fields = MultiFields.getIndexedFields(ir);
+    final Collection<String> fields = FieldInfos.getIndexedFields(ir);
     for (String field : fields) {
-      Terms terms = MultiFields.getTerms(ir, field);
+      Terms terms = MultiTerms.getTerms(ir, field);
       if (terms == null) {
         continue;
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
index b3260d0..a838af3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java
@@ -205,7 +205,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
       writer.addDocument(doc);
     }
     DirectoryReader r = writer.getReader();
-    FieldInfos fi = MultiFields.getMergedFieldInfos(r);
+    FieldInfos fi = FieldInfos.getMergedFieldInfos(r);
     FieldInfo dvInfo = fi.fieldInfo("dv");
     assertTrue(dvInfo.getDocValuesType() != DocValuesType.NONE);
     NumericDocValues dv = MultiDocValues.getNumericValues(r, "dv");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
index da96f98..54e6d26 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
@@ -126,7 +126,7 @@ public class TestDocumentWriter extends LuceneTestCase {
     writer.close();
     SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
 
-    PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader, "repeated", new BytesRef("repeated"));
+    PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "repeated", new BytesRef("repeated"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     int freq = termPositions.freq();
     assertEquals(2, freq);
@@ -197,7 +197,7 @@ public class TestDocumentWriter extends LuceneTestCase {
     writer.close();
     SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
 
-    PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader, "f1", new BytesRef("a"));
+    PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "f1", new BytesRef("a"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     int freq = termPositions.freq();
     assertEquals(3, freq);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
index f89ef9c..030d8b0 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
@@ -164,7 +164,7 @@ public class TestFilterLeafReader extends LuceneTestCase {
     writer.close();
     IndexReader reader = DirectoryReader.open(target);
     
-    TermsEnum terms = MultiFields.getTerms(reader, "default").iterator();
+    TermsEnum terms = MultiTerms.getTerms(reader, "default").iterator();
     while (terms.next() != null) {
       assertTrue(terms.term().utf8ToString().indexOf('e') != -1);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fd916480/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFlex.java b/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
index 662d034..18e2c0f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
@@ -55,7 +55,7 @@ public class TestFlex extends LuceneTestCase {
 
       IndexReader r = w.getReader();
       
-      TermsEnum terms = MultiFields.getTerms(r, "field3").iterator();
+      TermsEnum terms = MultiTerms.getTerms(r, "field3").iterator();
       assertEquals(TermsEnum.SeekStatus.END, terms.seekCeil(new BytesRef("abc")));
       r.close();
     }