You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by sy...@apache.org on 2015/08/12 19:52:06 UTC

[4/4] lucenenet git commit: Finished testing Lucene.Net.Queries

Finished testing Lucene.Net.Queries

Added a few implementation changes based around expecting certain
arguments to throw if empty/null.


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/2808ec74
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/2808ec74
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/2808ec74

Branch: refs/heads/master
Commit: 2808ec7408968488aa6fd8091c1be8fe3fbd1eb9
Parents: 7b4987f
Author: Josh Sullivan <ja...@gmail.com>
Authored: Tue Aug 11 01:31:24 2015 -0400
Committer: Josh Sullivan <ja...@gmail.com>
Committed: Tue Aug 11 01:31:24 2015 -0400

----------------------------------------------------------------------
 src/Lucene.Net.Queries/CustomScoreQuery.cs      |   8 +-
 src/Lucene.Net.Queries/TermsFilter.cs           |  10 +
 .../Function/FunctionTestSetup.cs               | 172 ++++++++
 .../Function/TestBoostedQuery.cs                |  79 ++++
 .../Function/TestDocValuesFieldSources.cs       | 149 +++++++
 .../Function/TestFieldScoreQuery.cs             | 162 ++++++++
 .../Function/TestFunctionQuerySort.cs           |  80 ++++
 .../Function/TestLongNormValueSource.cs         | 230 +++++++++++
 .../Function/TestOrdValues.cs                   | 157 ++++++++
 .../Function/TestValueSources.cs                | 339 ++++++++++++++++
 .../Lucene.Net.Tests.Queries.csproj             |  16 +-
 .../Mlt/TestMoreLikeThis.cs                     | 134 +++++++
 src/Lucene.Net.Tests.Queries/TermsFilterTest.cs | 326 +++++++++++++++
 .../TestCustomScoreQuery.cs                     | 397 +++++++++++++++++++
 14 files changed, 2251 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Queries/CustomScoreQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/CustomScoreQuery.cs b/src/Lucene.Net.Queries/CustomScoreQuery.cs
index 94e7895..0a17ca2 100644
--- a/src/Lucene.Net.Queries/CustomScoreQuery.cs
+++ b/src/Lucene.Net.Queries/CustomScoreQuery.cs
@@ -399,10 +399,10 @@ namespace Lucene.Net.Queries
             }
         }
 
-        //            public override Weight CreateWeight(IndexSearcher searcher)
-        //            {                
-        //                return new CustomWeight(this, searcher);
-        //            }
+        public override Weight CreateWeight(IndexSearcher searcher)
+        {                
+            return new CustomWeight(this, searcher);
+        }
 
         /// <summary>
         /// Checks if this is strict custom scoring.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Queries/TermsFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Queries/TermsFilter.cs b/src/Lucene.Net.Queries/TermsFilter.cs
index dfe9449..cd28f3f 100644
--- a/src/Lucene.Net.Queries/TermsFilter.cs
+++ b/src/Lucene.Net.Queries/TermsFilter.cs
@@ -67,6 +67,11 @@ namespace Lucene.Net.Queries
 
             public FieldAndTermEnumAnonymousInnerClassHelper(List<Term> terms)
             {
+                if (!terms.Any())
+                {
+                    throw new ArgumentException("no terms provided");
+                }
+
                 this.terms = terms;
                 terms.Sort();
                 iter = terms.GetEnumerator();
@@ -102,6 +107,11 @@ namespace Lucene.Net.Queries
             public FieldAndTermEnumAnonymousInnerClassHelper2(string field, List<BytesRef> terms)
                 : base(field)
             {
+                if (!terms.Any())
+                {
+                    throw new ArgumentException("no terms provided");
+                }
+
                 this.terms = terms;
                 terms.Sort();
                 iter = terms.GetEnumerator();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Function/FunctionTestSetup.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Function/FunctionTestSetup.cs b/src/Lucene.Net.Tests.Queries/Function/FunctionTestSetup.cs
new file mode 100644
index 0000000..f06b671
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/Function/FunctionTestSetup.cs
@@ -0,0 +1,172 @@
+using System;
+using Lucene.Net.Analysis;
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.Queries.Function;
+using Lucene.Net.Queries.Function.ValueSources;
+using Lucene.Net.Store;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Tests.Queries.Function
+{
+    /// <summary>
+    /// Setup for function tests
+    /// </summary>
+    public abstract class FunctionTestSetup : LuceneTestCase
+    {
+
+        /// <summary>
+        /// Actual score computation order is slightly different than assumptios
+        /// this allows for a small amount of variation
+        /// </summary>
+        protected internal static float TEST_SCORE_TOLERANCE_DELTA = 0.001f;
+
+        protected internal const int N_DOCS = 17; // select a primary number > 2
+
+        protected internal const string ID_FIELD = "id";
+        protected internal const string TEXT_FIELD = "text";
+        protected internal const string INT_FIELD = "iii";
+        protected internal const string FLOAT_FIELD = "fff";
+
+        protected internal ValueSource BYTE_VALUESOURCE = new ByteFieldSource(INT_FIELD);
+        protected internal ValueSource SHORT_VALUESOURCE = new ShortFieldSource(INT_FIELD);
+        protected internal ValueSource INT_VALUESOURCE = new IntFieldSource(INT_FIELD);
+        protected internal ValueSource INT_AS_FLOAT_VALUESOURCE = new FloatFieldSource(INT_FIELD);
+        protected internal ValueSource FLOAT_VALUESOURCE = new FloatFieldSource(FLOAT_FIELD);
+
+        private static readonly string[] DOC_TEXT_LINES =
+        {
+            @"Well, this is just some plain text we use for creating the ",
+            "test documents. It used to be a text from an online collection ",
+            "devoted to first aid, but if there was there an (online) lawyers ",
+            "first aid collection with legal advices, \"it\" might have quite ",
+            "probably advised one not to include \"it\"'s text or the text of ",
+            "any other online collection in one's code, unless one has money ",
+            "that one don't need and one is happy to donate for lawyers ",
+            "charity. Anyhow at some point, rechecking the usage of this text, ",
+            "it became uncertain that this text is free to use, because ",
+            "the web site in the disclaimer of he eBook containing that text ",
+            "was not responding anymore, and at the same time, in projGut, ",
+            "searching for first aid no longer found that eBook as well. ",
+            "So here we are, with a perhaps much less interesting ",
+            "text for the test, but oh much much safer. "
+        };
+
+        protected internal static Directory dir;
+        protected internal static Analyzer anlzr;
+
+        [TearDown]
+        public override void TearDown()
+        {
+            base.TearDown();
+            dir.Dispose();
+            dir = null;
+            anlzr = null;
+        }
+
+        
+        protected internal static void CreateIndex(bool doMultiSegment)
+        {
+            if (VERBOSE)
+            {
+                Console.WriteLine("TEST: setUp");
+            }
+            // prepare a small index with just a few documents.
+            dir = NewDirectory();
+            anlzr = new MockAnalyzer(Random());
+            IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).SetMergePolicy(NewLogMergePolicy());
+            if (doMultiSegment)
+            {
+                iwc.SetMaxBufferedDocs(TestUtil.NextInt(Random(), 2, 7));
+            }
+            RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, iwc);
+            // add docs not exactly in natural ID order, to verify we do check the order of docs by scores
+            int remaining = N_DOCS;
+            bool[] done = new bool[N_DOCS];
+            int i = 0;
+            while (remaining > 0)
+            {
+                if (done[i])
+                {
+                    throw new Exception("to set this test correctly N_DOCS=" + N_DOCS + " must be primary and greater than 2!");
+                }
+                AddDoc(iw, i);
+                done[i] = true;
+                i = (i + 4) % N_DOCS;
+                remaining--;
+            }
+            if (!doMultiSegment)
+            {
+                if (VERBOSE)
+                {
+                    Console.WriteLine("TEST: setUp full merge");
+                }
+                iw.ForceMerge(1);
+            }
+            iw.Dispose();
+            if (VERBOSE)
+            {
+                Console.WriteLine("TEST: setUp done close");
+            }
+        }
+        
+        private static void AddDoc(RandomIndexWriter iw, int i)
+        {
+            Document d = new Document();
+            Field f;
+            int scoreAndID = i + 1;
+
+            FieldType customType = new FieldType(TextField.TYPE_STORED);
+            customType.Tokenized = false;
+            customType.OmitNorms = true;
+
+            f = NewField(ID_FIELD, Id2String(scoreAndID), customType); // for debug purposes
+            d.Add(f);
+
+            FieldType customType2 = new FieldType(TextField.TYPE_NOT_STORED);
+            customType2.OmitNorms = true;
+            f = NewField(TEXT_FIELD, "text of doc" + scoreAndID + TextLine(i), customType2); // for regular search
+            d.Add(f);
+
+            f = NewField(INT_FIELD, "" + scoreAndID, customType); // for function scoring
+            d.Add(f);
+
+            f = NewField(FLOAT_FIELD, scoreAndID + ".000", customType); // for function scoring
+            d.Add(f);
+
+            iw.AddDocument(d);
+            Log("added: " + d);
+        }
+
+        // 17 --> ID00017
+        protected internal static string Id2String(int scoreAndID)
+        {
+            string s = "000000000" + scoreAndID;
+            int n = ("" + N_DOCS).Length + 3;
+            int k = s.Length - n;
+            return "ID" + s.Substring(k);
+        }
+
+        // some text line for regular search
+        private static string TextLine(int docNum)
+        {
+            return DOC_TEXT_LINES[docNum % DOC_TEXT_LINES.Length];
+        }
+
+        // extract expected doc score from its ID Field: "ID7" --> 7.0
+        protected internal static float ExpectedFieldScore(string docIDFieldVal)
+        {
+            return Convert.ToSingle(docIDFieldVal.Substring(2));
+        }
+
+        // debug messages (change DBG to true for anything to print)
+        protected internal static void Log(object o)
+        {
+            if (VERBOSE)
+            {
+                Console.WriteLine(o.ToString());
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Function/TestBoostedQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Function/TestBoostedQuery.cs b/src/Lucene.Net.Tests.Queries/Function/TestBoostedQuery.cs
new file mode 100644
index 0000000..8ddd281
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/Function/TestBoostedQuery.cs
@@ -0,0 +1,79 @@
+using Lucene.Net.Analysis;
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.Queries.Function;
+using Lucene.Net.Queries.Function.ValueSources;
+using Lucene.Net.Search;
+using Lucene.Net.Store;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Tests.Queries.Function
+{
+    /// <summary>
+    /// Basic tests for <seealso cref="BoostedQuery"/>
+    /// </summary>
+    // TODO: more tests
+    public class TestBoostedQuery : LuceneTestCase
+    {
+        internal static Directory dir;
+        internal static IndexReader ir;
+        internal static IndexSearcher @is;
+        
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+            dir = NewDirectory();
+            IndexWriterConfig iwConfig = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+            iwConfig.SetMergePolicy(NewLogMergePolicy());
+            RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, iwConfig);
+            Document document = new Document();
+            Field idField = new StringField("id", "", Field.Store.NO);
+            document.Add(idField);
+            iw.AddDocument(document);
+            ir = iw.Reader;
+            @is = NewSearcher(ir);
+            iw.Dispose();
+        }
+
+        [TearDown]
+        public override void TearDown()
+        {
+            base.TearDown();
+            @is = null;
+            ir.Dispose();
+            ir = null;
+            dir.Dispose();
+            dir = null;
+        }
+        
+        [Test]
+        public virtual void TestBasic()
+        {
+            Query q = new MatchAllDocsQuery();
+            TopDocs docs = @is.Search(q, 10);
+            assertEquals(1, docs.TotalHits);
+            float score = docs.ScoreDocs[0].Score;
+
+            Query boostedQ = new BoostedQuery(q, new ConstValueSource(2.0f));
+            AssertHits(boostedQ, new float[] { score * 2 });
+        }
+
+
+        private void AssertHits(Query q, float[] scores)
+        {
+            ScoreDoc[] expected = new ScoreDoc[scores.Length];
+            int[] expectedDocs = new int[scores.Length];
+            for (int i = 0; i < expected.Length; i++)
+            {
+                expectedDocs[i] = i;
+                expected[i] = new ScoreDoc(i, scores[i]);
+            }
+            TopDocs docs = @is.Search(q, 10, new Sort(new SortField("id", SortField.Type_e.STRING)));
+            CheckHits.DoCheckHits(Random(), q, "", @is, expectedDocs);
+            CheckHits.CheckHitsQuery(q, expected, docs.ScoreDocs, expectedDocs);
+            CheckHits.CheckExplanations(q, "", @is);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Function/TestDocValuesFieldSources.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Function/TestDocValuesFieldSources.cs b/src/Lucene.Net.Tests.Queries/Function/TestDocValuesFieldSources.cs
new file mode 100644
index 0000000..621c6eb
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/Function/TestDocValuesFieldSources.cs
@@ -0,0 +1,149 @@
+using System;
+using Lucene.Net.Analysis;
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.Queries.Function;
+using Lucene.Net.Queries.Function.ValueSources;
+using Lucene.Net.Randomized.Generators;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Packed;
+using NUnit.Framework;
+
+namespace Lucene.Net.Tests.Queries.Function
+{
+    // [Util.LuceneTestCase.SuppressCodecs("Lucene3x")]
+    public class TestDocValuesFieldSources : LuceneTestCase
+    {
+        private void DoTest(FieldInfo.DocValuesType_e type)
+        {
+            Directory d = NewDirectory();
+            IndexWriterConfig iwConfig = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+            int nDocs = AtLeast(50);
+            Field id = new NumericDocValuesField("id", 0);
+            Field f;
+            switch (type)
+            {
+                case FieldInfo.DocValuesType_e.BINARY:
+                    f = new BinaryDocValuesField("dv", new BytesRef());
+                    break;
+                case FieldInfo.DocValuesType_e.SORTED:
+                    f = new SortedDocValuesField("dv", new BytesRef());
+                    break;
+                case FieldInfo.DocValuesType_e.NUMERIC:
+                    f = new NumericDocValuesField("dv", 0);
+                    break;
+                default:
+                    throw new InvalidOperationException();
+            }
+            Document document = new Document();
+            document.Add(id);
+            document.Add(f);
+
+            object[] vals = new object[nDocs];
+
+            RandomIndexWriter iw = new RandomIndexWriter(Random(), d, iwConfig);
+            for (int i = 0; i < nDocs; ++i)
+            {
+                id.LongValue = i;
+                switch (type)
+                {
+                    case FieldInfo.DocValuesType_e.SORTED:
+                    case FieldInfo.DocValuesType_e.BINARY:
+                        do
+                        {
+                            vals[i] = TestUtil.RandomSimpleString(Random(), 20);
+                        } while (((string)vals[i]).Length == 0);
+                        f.BytesValue = new BytesRef((string)vals[i]);
+                        break;
+                    case FieldInfo.DocValuesType_e.NUMERIC:
+                        int bitsPerValue = Random().NextIntBetween(1, 31); // keep it an int
+                        vals[i] = (long)Random().Next((int)PackedInts.MaxValue(bitsPerValue));
+                        f.LongValue = (long) vals[i];
+                        break;
+                }
+                iw.AddDocument(document);
+                if (Random().NextBoolean() && i % 10 == 9)
+                {
+                    iw.Commit();
+                }
+            }
+            iw.Dispose();
+
+            DirectoryReader rd = DirectoryReader.Open(d);
+            foreach (AtomicReaderContext leave in rd.Leaves)
+            {
+                FunctionValues ids = (new LongFieldSource("id")).GetValues(null, leave);
+                ValueSource vs;
+                switch (type)
+                {
+                    case FieldInfo.DocValuesType_e.BINARY:
+                    case FieldInfo.DocValuesType_e.SORTED:
+                        vs = new BytesRefFieldSource("dv");
+                        break;
+                    case FieldInfo.DocValuesType_e.NUMERIC:
+                        vs = new LongFieldSource("dv");
+                        break;
+                    default:
+                        throw new InvalidOperationException();
+                }
+                FunctionValues values = vs.GetValues(null, leave);
+                BytesRef bytes = new BytesRef();
+                for (int i = 0; i < leave.AtomicReader.MaxDoc; ++i)
+                {
+                    assertTrue(values.Exists(i));
+                    if (vs is BytesRefFieldSource)
+                    {
+                        assertTrue(values.ObjectVal(i) is string);
+                    }
+                    else if (vs is LongFieldSource)
+                    {
+                        assertTrue(values.ObjectVal(i) is long?);
+                        assertTrue(values.BytesVal(i, bytes));
+                    }
+                    else
+                    {
+                        throw new InvalidOperationException();
+                    }
+
+                    object expected = vals[ids.IntVal(i)];
+                    switch (type)
+                    {
+                        case FieldInfo.DocValuesType_e.SORTED:
+                            values.OrdVal(i); // no exception
+                            assertTrue(values.NumOrd() >= 1);
+                            goto case FieldInfo.DocValuesType_e.BINARY;
+                        case FieldInfo.DocValuesType_e.BINARY:
+                            assertEquals(expected, values.ObjectVal(i));
+                            assertEquals(expected, values.StrVal(i));
+                            assertEquals(expected, values.ObjectVal(i));
+                            assertEquals(expected, values.StrVal(i));
+                            assertTrue(values.BytesVal(i, bytes));
+                            assertEquals(new BytesRef((string)expected), bytes);
+                            break;
+                        case FieldInfo.DocValuesType_e.NUMERIC:
+                            assertEquals(Number.ToInt64(expected.ToString()), values.LongVal(i));
+                            break;
+                    }
+                }
+            }
+            rd.Dispose();
+            d.Dispose();
+        }
+        
+        [Test]
+        public void Test()
+        {
+            var values = Enum.GetValues(typeof(FieldInfo.DocValuesType_e));
+            foreach (FieldInfo.DocValuesType_e type in values)
+            {
+                if (type != FieldInfo.DocValuesType_e.SORTED_SET)
+                {
+                    DoTest(type);
+                }
+            }
+        }
+
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Function/TestFieldScoreQuery.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Function/TestFieldScoreQuery.cs b/src/Lucene.Net.Tests.Queries/Function/TestFieldScoreQuery.cs
new file mode 100644
index 0000000..39938bd
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/Function/TestFieldScoreQuery.cs
@@ -0,0 +1,162 @@
+using Lucene.Net.Index;
+using Lucene.Net.Queries.Function;
+using Lucene.Net.Search;
+using NUnit.Framework;
+
+namespace Lucene.Net.Tests.Queries.Function
+{
+    /// <summary>
+    /// Test FieldScoreQuery search.
+    /// <p>
+    /// Tests here create an index with a few documents, each having
+    /// an int value indexed  field and a float value indexed field.
+    /// The values of these fields are later used for scoring.
+    /// <p>
+    /// The rank tests use Hits to verify that docs are ordered (by score) as expected.
+    /// <p>
+    /// The exact score tests use TopDocs top to verify the exact score.  
+    /// </summary>
+    public class TestFieldScoreQuery : FunctionTestSetup
+    {
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+            CreateIndex(true);
+        }
+
+        /// <summary>
+        /// Test that FieldScoreQuery of Type.BYTE returns docs in expected order.
+        /// </summary>
+        [Test]
+        public void TestRankByte()
+        {
+            // INT field values are small enough to be parsed as byte
+            DoTestRank(BYTE_VALUESOURCE);
+        }
+
+        /// <summary>
+        /// Test that FieldScoreQuery of Type.SHORT returns docs in expected order.
+        /// </summary>
+        [Test]
+        public void TestRankShort()
+        {
+            // INT field values are small enough to be parsed as short
+            DoTestRank(SHORT_VALUESOURCE);
+        }
+
+        /// <summary>
+        /// Test that FieldScoreQuery of Type.INT returns docs in expected order.
+        /// </summary>
+        [Test]
+        public void TestRankInt()
+        {
+            DoTestRank(INT_VALUESOURCE);
+        }
+
+        /// <summary>
+        /// Test that FieldScoreQuery of Type.FLOAT returns docs in expected order.
+        /// </summary>
+        [Test]
+        public void TestRankFloat()
+        {
+            // INT field can be parsed as float
+            DoTestRank(INT_AS_FLOAT_VALUESOURCE);
+            // same values, but in flot format
+            DoTestRank(FLOAT_VALUESOURCE);
+        }
+
+        /// <summary>
+        /// Test that FieldScoreQuery returns docs in expected order.
+        /// </summary>
+        /// <param name="valueSource"></param>
+        private void DoTestRank(ValueSource valueSource)
+        {
+            FunctionQuery functionQuery = new FunctionQuery(valueSource);
+            IndexReader r = DirectoryReader.Open(dir);
+            IndexSearcher s = NewSearcher(r);
+            Log("test: " + functionQuery);
+            QueryUtils.Check(Random(), functionQuery, s);
+            ScoreDoc[] h = s.Search(functionQuery, null, 1000).ScoreDocs;
+            assertEquals("All docs should be matched!", N_DOCS, h.Length);
+            string prevID = "ID" + (N_DOCS + 1); // greater than all ids of docs in this test
+            for (int i = 0; i < h.Length; i++)
+            {
+                string resID = s.Doc(h[i].Doc).Get(ID_FIELD);
+                Log(i + ".   score=" + h[i].Score + "  -  " + resID);
+                Log(s.Explain(functionQuery, h[i].Doc));
+                assertTrue("res id " + resID + " should be < prev res id " + prevID, resID.CompareTo(prevID) < 0);
+                prevID = resID;
+            }
+            r.Dispose();
+        }
+
+        /// <summary>
+        /// Test that FieldScoreQuery of Type.BYTE returns the expected scores. </summary>
+        //JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes:
+        //ORIGINAL LINE: @Test public void testExactScoreByte() throws Exception
+        //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
+        public virtual void testExactScoreByte()
+        {
+            // INT field values are small enough to be parsed as byte
+            doTestExactScore(BYTE_VALUESOURCE);
+        }
+
+        /// <summary>
+        /// Test that FieldScoreQuery of Type.SHORT returns the expected scores. </summary>
+        //JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes:
+        //ORIGINAL LINE: @Test public void testExactScoreShort() throws Exception
+        //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
+        public virtual void testExactScoreShort()
+        {
+            // INT field values are small enough to be parsed as short
+            doTestExactScore(SHORT_VALUESOURCE);
+        }
+
+        /// <summary>
+        /// Test that FieldScoreQuery of Type.INT returns the expected scores. </summary>
+        //JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes:
+        //ORIGINAL LINE: @Test public void testExactScoreInt() throws Exception
+        //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
+        public virtual void testExactScoreInt()
+        {
+            doTestExactScore(INT_VALUESOURCE);
+        }
+
+        /// <summary>
+        /// Test that FieldScoreQuery of Type.FLOAT returns the expected scores. </summary>
+        //JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes:
+        //ORIGINAL LINE: @Test public void testExactScoreFloat() throws Exception
+        //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
+        public virtual void testExactScoreFloat()
+        {
+            // INT field can be parsed as float
+            doTestExactScore(INT_AS_FLOAT_VALUESOURCE);
+            // same values, but in flot format
+            doTestExactScore(FLOAT_VALUESOURCE);
+        }
+
+        // Test that FieldScoreQuery returns docs with expected score.
+        //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
+        //ORIGINAL LINE: private void doTestExactScore(ValueSource valueSource) throws Exception
+        private void doTestExactScore(ValueSource valueSource)
+        {
+            FunctionQuery functionQuery = new FunctionQuery(valueSource);
+            IndexReader r = DirectoryReader.Open(dir);
+            IndexSearcher s = NewSearcher(r);
+            TopDocs td = s.Search(functionQuery, null, 1000);
+            assertEquals("All docs should be matched!", N_DOCS, td.TotalHits);
+            ScoreDoc[] sd = td.ScoreDocs;
+            foreach (ScoreDoc aSd in sd)
+            {
+                float score = aSd.Score;
+                Log(s.Explain(functionQuery, aSd.Doc));
+                string id = s.IndexReader.Document(aSd.Doc).Get(ID_FIELD);
+                float expectedScore = ExpectedFieldScore(id); // "ID7" --> 7.0
+                assertEquals("score of " + id + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
+            }
+            r.Dispose();
+        }
+
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Function/TestFunctionQuerySort.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Function/TestFunctionQuerySort.cs b/src/Lucene.Net.Tests.Queries/Function/TestFunctionQuerySort.cs
new file mode 100644
index 0000000..cbec34c
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/Function/TestFunctionQuerySort.cs
@@ -0,0 +1,80 @@
+using System;
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.Queries.Function.ValueSources;
+using Lucene.Net.Search;
+using Lucene.Net.Store;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Tests.Queries.Function
+{
+    /// <summary>
+    /// Test that functionquery's getSortField() actually works.
+    /// </summary>
+    public class TestFunctionQuerySort : LuceneTestCase
+    {
+        [Test]
+        public void TestSearchAfterWhenSortingByFunctionValues()
+        {
+            Directory dir = NewDirectory();
+            IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, null);
+            iwc.SetMergePolicy(NewLogMergePolicy()); // depends on docid order
+            RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, iwc);
+
+            Document doc = new Document();
+            Field field = new StringField("value", "", Field.Store.YES);
+            doc.Add(field);
+
+            // Save docs unsorted (decreasing value n, n-1, ...)
+            const int NUM_VALS = 5;
+            for (int val = NUM_VALS; val > 0; val--)
+            {
+                field.StringValue = Convert.ToString(val);
+                writer.AddDocument(doc);
+            }
+
+            // Open index
+            IndexReader reader = writer.Reader;
+            writer.Dispose();
+            IndexSearcher searcher = NewSearcher(reader);
+
+            // Get ValueSource from FieldCache
+            IntFieldSource src = new IntFieldSource("value");
+            // ...and make it a sort criterion
+            SortField sf = src.GetSortField(false).Rewrite(searcher);
+            Sort orderBy = new Sort(sf);
+
+            // Get hits sorted by our FunctionValues (ascending values)
+            Query q = new MatchAllDocsQuery();
+            TopDocs hits = searcher.Search(q, reader.MaxDoc, orderBy);
+            assertEquals(NUM_VALS, hits.ScoreDocs.Length);
+            // Verify that sorting works in general
+            int i = 0;
+            foreach (ScoreDoc hit in hits.ScoreDocs)
+            {
+                int valueFromDoc = Convert.ToInt32(reader.Document(hit.Doc).Get("value"));
+                assertEquals(++i, valueFromDoc);
+            }
+
+            // Now get hits after hit #2 using IS.searchAfter()
+            int afterIdx = 1;
+            FieldDoc afterHit = (FieldDoc)hits.ScoreDocs[afterIdx];
+            hits = searcher.SearchAfter(afterHit, q, reader.MaxDoc, orderBy);
+
+            // Expected # of hits: NUM_VALS - 2
+            assertEquals(NUM_VALS - (afterIdx + 1), hits.ScoreDocs.Length);
+
+            // Verify that hits are actually "after"
+            int afterValue = (int)((double?)afterHit.Fields[0]);
+            foreach (ScoreDoc hit in hits.ScoreDocs)
+            {
+                int val = Convert.ToInt32(reader.Document(hit.Doc).Get("value"));
+                assertTrue(afterValue <= val);
+                assertFalse(hit.Doc == afterHit.Doc);
+            }
+            reader.Dispose();
+            dir.Dispose();
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Function/TestLongNormValueSource.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Function/TestLongNormValueSource.cs b/src/Lucene.Net.Tests.Queries/Function/TestLongNormValueSource.cs
new file mode 100644
index 0000000..38ebe7f
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/Function/TestLongNormValueSource.cs
@@ -0,0 +1,230 @@
+using System;
+using Lucene.Net.Analysis;
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.Queries.Function;
+using Lucene.Net.Queries.Function.ValueSources;
+using Lucene.Net.Search;
+using Lucene.Net.Search.Similarities;
+using Lucene.Net.Store;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Tests.Queries.Function
+{
+    // [Util.LuceneTestCase.SuppressCodecs("Lucene3x")]
+    public class TestLongNormValueSource : LuceneTestCase
+    {
+        internal static Directory dir;
+        internal static IndexReader reader;
+        internal static IndexSearcher searcher;
+        private static Similarity sim = new PreciseDefaultSimilarity();
+        
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+
+            dir = NewDirectory();
+            IndexWriterConfig iwConfig = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+            iwConfig.SetMergePolicy(NewLogMergePolicy());
+            iwConfig.SetSimilarity(sim);
+            RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, iwConfig);
+
+            Document doc = new Document();
+            doc.Add(new TextField("text", "this is a test test test", Field.Store.NO));
+            iw.AddDocument(doc);
+
+            doc = new Document();
+            doc.Add(new TextField("text", "second test", Field.Store.NO));
+            iw.AddDocument(doc);
+
+            reader = iw.Reader;
+            searcher = NewSearcher(reader);
+            iw.Dispose();
+        }
+        
+        [TearDown]
+        public override void TearDown()
+        {
+            base.TearDown();
+
+            searcher = null;
+            reader.Dispose();
+            reader = null;
+            dir.Dispose();
+            dir = null;
+        }
+        
+        [Test]
+        public void TestNorm()
+        {
+            Similarity saved = searcher.Similarity;
+            try
+            {
+                // no norm field (so agnostic to indexed similarity)
+                searcher.Similarity = sim;
+                AssertHits(new FunctionQuery(new NormValueSource("text")), new float[] { 0f, 0f });
+            }
+            finally
+            {
+                searcher.Similarity = saved;
+            }
+        }
+        
+        protected virtual void AssertHits(Query q, float[] scores)
+        {
+            ScoreDoc[] expected = new ScoreDoc[scores.Length];
+            int[] expectedDocs = new int[scores.Length];
+            for (int i = 0; i < expected.Length; i++)
+            {
+                expectedDocs[i] = i;
+                expected[i] = new ScoreDoc(i, scores[i]);
+            }
+            TopDocs docs = searcher.Search(q, 2, new Sort(new SortField("id", SortField.Type_e.STRING)));
+
+            /*
+            for (int i=0;i<docs.scoreDocs.length;i++) {
+              System.out.println(searcher.explain(q, docs.scoreDocs[i].doc));
+            }
+            */
+
+            CheckHits.DoCheckHits(Random(), q, "", searcher, expectedDocs);
+            CheckHits.CheckHitsQuery(q, expected, docs.ScoreDocs, expectedDocs);
+            CheckHits.CheckExplanations(q, "", searcher);
+        }
+    }
+
+    /// <summary>
+    /// Encodes norm as 4-byte float. </summary>
+    internal class PreciseDefaultSimilarity : TFIDFSimilarity
+    {
+        /// <summary>
+        /// Sole constructor: parameter-free </summary>
+        public PreciseDefaultSimilarity()
+        {
+        }
+
+        /// <summary>
+        /// Implemented as <code>overlap / maxOverlap</code>. </summary>
+        public override float Coord(int overlap, int maxOverlap)
+        {
+            return overlap / (float)maxOverlap;
+        }
+
+        /// <summary>
+        /// Implemented as <code>1/sqrt(sumOfSquaredWeights)</code>. </summary>
+        public override float QueryNorm(float sumOfSquaredWeights)
+        {
+            return (float)(1.0 / Math.Sqrt(sumOfSquaredWeights));
+        }
+
+        /// <summary>
+        /// Encodes a normalization factor for storage in an index.
+        /// <p>
+        /// The encoding uses a three-bit mantissa, a five-bit exponent, and the
+        /// zero-exponent point at 15, thus representing values from around 7x10^9 to
+        /// 2x10^-9 with about one significant decimal digit of accuracy. Zero is also
+        /// represented. Negative numbers are rounded up to zero. Values too large to
+        /// represent are rounded down to the largest representable value. Positive
+        /// values too small to represent are rounded up to the smallest positive
+        /// representable value.
+        /// </summary>
+        /// <seealso cref= org.apache.lucene.document.Field#setBoost(float) </seealso>
+        /// <seealso cref= org.apache.lucene.util.SmallFloat </seealso>
+        public override long EncodeNormValue(float f)
+        {
+            return BitConverter.DoubleToInt64Bits(f);
+        }
+
+        /// <summary>
+        /// Decodes the norm value, assuming it is a single byte.
+        /// </summary>
+        /// <seealso cref= #encodeNormValue(float) </seealso>
+        public override float DecodeNormValue(long norm)
+        {
+            return (float) BitConverter.Int64BitsToDouble(norm);
+        }
+
+        /// <summary>
+        /// Implemented as
+        ///  <code>state.getBoost()*lengthNorm(numTerms)</code>, where
+        ///  <code>numTerms</code> is <seealso cref="FieldInvertState#getLength()"/> if {@link
+        ///  #setDiscountOverlaps} is false, else it's {@link
+        ///  org.apache.lucene.index.FieldInvertState#getLength()} - {@link
+        ///  org.apache.lucene.index.FieldInvertState#getNumOverlap()}.
+        /// 
+        ///  @lucene.experimental 
+        /// </summary>
+        public override float LengthNorm(FieldInvertState state)
+        {
+            int numTerms;
+            if (discountOverlaps)
+            {
+                numTerms = state.Length - state.NumOverlap;
+            }
+            else
+            {
+                numTerms = state.Length;
+            }
+            return state.Boost * ((float)(1.0 / Math.Sqrt(numTerms)));
+        }
+
+        /// <summary>
+        /// Implemented as <code>sqrt(freq)</code>. </summary>
+        public override float Tf(float freq)
+        {
+            return (float)Math.Sqrt(freq);
+        }
+
+        /// <summary>
+        /// Implemented as <code>1 / (distance + 1)</code>. 
+        /// </summary>
+        public override float SloppyFreq(int distance)
+        {
+            return 1.0f / (distance + 1);
+        }
+
+        /// <summary>
+        /// The default implementation returns <code>1</code>
+        /// </summary>
+        public override float ScorePayload(int doc, int start, int end, BytesRef payload)
+        {
+            return 1;
+        }
+
+        /// <summary>
+        /// Implemented as <code>log(numDocs/(docFreq+1)) + 1</code>. 
+        /// </summary>
+        public override float Idf(long docFreq, long numDocs)
+        {
+            return (float)(Math.Log(numDocs / (double)(docFreq + 1)) + 1.0);
+        }
+
+        /// <summary>
+        /// True if overlap tokens (tokens with a position of increment of zero) are
+        /// discounted from the document's length.
+        /// </summary>
+        protected internal bool discountOverlaps = true;
+
+        /// <summary>
+        /// Determines whether overlap tokens (Tokens with
+        ///  0 position increment) are ignored when computing
+        ///  norm.  By default this is true, meaning overlap
+        ///  tokens do not count when computing norms.
+        /// 
+        ///  @lucene.experimental
+        /// </summary>
+        ///  <seealso cref= #computeNorm </seealso>
+        public virtual bool DiscountOverlaps
+        {
+            set { discountOverlaps = value; }
+            get { return discountOverlaps; }
+        }
+
+        public override string ToString()
+        {
+            return "DefaultSimilarity";
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Function/TestOrdValues.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Function/TestOrdValues.cs b/src/Lucene.Net.Tests.Queries/Function/TestOrdValues.cs
new file mode 100644
index 0000000..ef396ee
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/Function/TestOrdValues.cs
@@ -0,0 +1,157 @@
+using Lucene.Net.Index;
+using Lucene.Net.Queries.Function;
+using Lucene.Net.Queries.Function.ValueSources;
+using Lucene.Net.Search;
+using NUnit.Framework;
+
+namespace Lucene.Net.Tests.Queries.Function
+{
+    /// <summary>
+    /// Test search based on OrdFieldSource and ReverseOrdFieldSource.
+    /// <p/>
+    /// Tests here create an index with a few documents, each having
+    /// an indexed "id" field.
+    /// The ord values of this field are later used for scoring.
+    /// <p/>
+    /// The order tests use Hits to verify that docs are ordered as expected.
+    /// <p/>
+    /// The exact score tests use TopDocs top to verify the exact score.
+    /// </summary>
+    public class TestOrdValues : FunctionTestSetup
+    {
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+            CreateIndex(false);
+        }
+
+        /// <summary>
+        /// Test OrdFieldSource
+        /// </summary>
+        [Test]
+        public void TestOrdFieldRank()
+        {
+            DoTestRank(ID_FIELD, true);
+        }
+
+        /// <summary>
+        /// Test ReverseOrdFieldSource
+        /// </summary>
+        [Test]
+        public void TestReverseOrdFieldRank()
+        {
+            DoTestRank(ID_FIELD, false);
+        }
+
+        /// <summary>
+        /// Test that queries based on reverse/ordFieldScore scores correctly
+        /// </summary>
+        /// <param name="field"></param>
+        /// <param name="inOrder"></param>
+        private static void DoTestRank(string field, bool inOrder)
+        {
+            IndexReader r = DirectoryReader.Open(dir);
+            IndexSearcher s = NewSearcher(r);
+            ValueSource vs;
+            if (inOrder)
+            {
+                vs = new OrdFieldSource(field);
+            }
+            else
+            {
+                vs = new ReverseOrdFieldSource(field);
+            }
+
+            Query q = new FunctionQuery(vs);
+            Log("test: " + q);
+            QueryUtils.Check(Random(), q, s);
+            ScoreDoc[] h = s.Search(q, null, 1000).ScoreDocs;
+            assertEquals("All docs should be matched!", N_DOCS, h.Length);
+            string prevID = inOrder ? "IE" : "IC"; // smaller than all ids of docs in this test ("ID0001", etc.) -  greater than all ids of docs in this test ("ID0001", etc.)
+
+            for (int i = 0; i < h.Length; i++)
+            {
+                string resID = s.Doc(h[i].Doc).Get(ID_FIELD);
+                Log(i + ".   score=" + h[i].Score + "  -  " + resID);
+                Log(s.Explain(q, h[i].Doc));
+                if (inOrder)
+                {
+                    assertTrue("res id " + resID + " should be < prev res id " + prevID, resID.CompareTo(prevID) < 0);
+                }
+                else
+                {
+                    assertTrue("res id " + resID + " should be > prev res id " + prevID, resID.CompareTo(prevID) > 0);
+                }
+                prevID = resID;
+            }
+            r.Dispose();
+        }
+
+        /// <summary>
+        /// Test exact score for OrdFieldSource
+        /// </summary>
+        [Test]
+        public void TestOrdFieldExactScore()
+        {
+            DoTestExactScore(ID_FIELD, true);
+        }
+
+        /// <summary>
+        /// Test exact score for ReverseOrdFieldSource
+        /// </summary>
+        [Test]
+        public void TestReverseOrdFieldExactScore()
+        {
+            DoTestExactScore(ID_FIELD, false);
+        }
+
+
+        /// <summary>
+        /// Test that queries based on reverse/ordFieldScore returns docs with expected score.
+        /// </summary>
+        /// <param name="field"></param>
+        /// <param name="inOrder"></param>
+        private void DoTestExactScore(string field, bool inOrder)
+        {
+            IndexReader r = DirectoryReader.Open(dir);
+            IndexSearcher s = NewSearcher(r);
+            ValueSource vs;
+            if (inOrder)
+            {
+                vs = new OrdFieldSource(field);
+            }
+            else
+            {
+                vs = new ReverseOrdFieldSource(field);
+            }
+            Query q = new FunctionQuery(vs);
+            TopDocs td = s.Search(q, null, 1000);
+            assertEquals("All docs should be matched!", N_DOCS, td.TotalHits);
+            ScoreDoc[] sd = td.ScoreDocs;
+            for (int i = 0; i < sd.Length; i++)
+            {
+                float score = sd[i].Score;
+                string id = s.IndexReader.Document(sd[i].Doc).Get(ID_FIELD);
+                Log("-------- " + i + ". Explain doc " + id);
+                Log(s.Explain(q, sd[i].Doc));
+                float expectedScore = N_DOCS - i - 1;
+                assertEquals("score of result " + i + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
+                string expectedId = inOrder ? Id2String(N_DOCS - i) : Id2String(i + 1); // reverse  ==> smaller values first -  in-order ==> larger  values first
+                assertTrue("id of result " + i + " shuould be " + expectedId + " != " + score, expectedId.Equals(id));
+            }
+            r.Dispose();
+        }
+
+        // LUCENE-1250
+        [Test]
+        public void TestEqualsNull()
+        {
+            OrdFieldSource ofs = new OrdFieldSource("f");
+            assertFalse(ofs.Equals(null));
+
+            ReverseOrdFieldSource rofs = new ReverseOrdFieldSource("f");
+            assertFalse(rofs.Equals(null));
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Function/TestValueSources.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Function/TestValueSources.cs b/src/Lucene.Net.Tests.Queries/Function/TestValueSources.cs
new file mode 100644
index 0000000..d48d41c
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/Function/TestValueSources.cs
@@ -0,0 +1,339 @@
+using System;
+using System.Collections.Generic;
+using Lucene.Net.Analysis;
+using Lucene.Net.Codecs;
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.Queries.Function;
+using Lucene.Net.Queries.Function.ValueSources;
+using Lucene.Net.Search;
+using Lucene.Net.Search.Similarities;
+using Lucene.Net.Store;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Tests.Queries.Function
+{
+    // TODO: add separate docvalues test
+    /// <summary>
+    /// barebones tests for function queries.
+    /// </summary>
+    public class TestValueSources : LuceneTestCase
+    {
+        internal static Directory dir;
+        internal static IndexReader reader;
+        internal static IndexSearcher searcher;
+
+        internal static readonly IList<string[]> documents = new[]
+        {
+            /*      id,  byte, double, float, int,  long,   short, string, text */
+            new[] { "0", "5",  "3.63", "5.2", "35", "4343", "945", "test", "this is a test test test" },
+            new[] { "1", "12", "5.65", "9.3", "54", "1954", "123", "bar", "second test" }
+        };
+
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+
+            dir = NewDirectory();
+            IndexWriterConfig iwConfig = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
+            iwConfig.SetMergePolicy(NewLogMergePolicy());
+            RandomIndexWriter iw = new RandomIndexWriter(Random(), dir, iwConfig);
+            Document document = new Document();
+            Field idField = new StringField("id", "", Field.Store.NO);
+            document.Add(idField);
+            Field byteField = new StringField("byte", "", Field.Store.NO);
+            document.Add(byteField);
+            Field doubleField = new StringField("double", "", Field.Store.NO);
+            document.Add(doubleField);
+            Field floatField = new StringField("float", "", Field.Store.NO);
+            document.Add(floatField);
+            Field intField = new StringField("int", "", Field.Store.NO);
+            document.Add(intField);
+            Field longField = new StringField("long", "", Field.Store.NO);
+            document.Add(longField);
+            Field shortField = new StringField("short", "", Field.Store.NO);
+            document.Add(shortField);
+            Field stringField = new StringField("string", "", Field.Store.NO);
+            document.Add(stringField);
+            Field textField = new TextField("text", "", Field.Store.NO);
+            document.Add(textField);
+
+            foreach (string[] doc in documents)
+            {
+                idField.StringValue = doc[0];
+                byteField.StringValue = doc[1];
+                doubleField.StringValue = doc[2];
+                floatField.StringValue = doc[3];
+                intField.StringValue = doc[4];
+                longField.StringValue = doc[5];
+                shortField.StringValue = doc[6];
+                stringField.StringValue = doc[7];
+                textField.StringValue = doc[8];
+                iw.AddDocument(document);
+            }
+
+            reader = iw.Reader;
+            searcher = NewSearcher(reader);
+            iw.Dispose();
+        }
+        
+        [TearDown]
+        public override void TearDown()
+        {
+            base.TearDown();
+
+            searcher = null;
+            reader.Dispose();
+            reader = null;
+            dir.Dispose();
+            dir = null;
+        }
+        
+        [Test]
+        public void TestByte()
+        {
+            AssertHits(new FunctionQuery(new ByteFieldSource("byte")), new[] { 5f, 12f });
+        }
+
+        [Test]
+        public void TestConst()
+        {
+            AssertHits(new FunctionQuery(new ConstValueSource(0.3f)), new[] { 0.3f, 0.3f });
+        }
+
+        [Test]
+        public void TestDiv()
+        {
+            AssertHits(new FunctionQuery(new DivFloatFunction(new ConstValueSource(10f), new ConstValueSource(5f))), new[] { 2f, 2f });
+        }
+
+        [Test]
+        public void TestDocFreq()
+        {
+            AssertHits(new FunctionQuery(new DocFreqValueSource("bogus", "bogus", "text", new BytesRef("test"))), new[] { 2f, 2f });
+        }
+
+        [Test]
+        public void TestDoubleConst()
+        {
+            AssertHits(new FunctionQuery(new DoubleConstValueSource(0.3d)), new[] { 0.3f, 0.3f });
+        }
+
+        [Test]
+        public void TestDouble()
+        {
+            AssertHits(new FunctionQuery(new DoubleFieldSource("double")), new[] { 3.63f, 5.65f });
+        }
+
+        [Test]
+        public void TestFloat()
+        {
+            AssertHits(new FunctionQuery(new FloatFieldSource("float")), new[] { 5.2f, 9.3f });
+        }
+
+        [Test]
+        public void TestIDF()
+        {
+            Similarity saved = searcher.Similarity;
+            try
+            {
+                searcher.Similarity = new DefaultSimilarity();
+                AssertHits(new FunctionQuery(new IDFValueSource("bogus", "bogus", "text", new BytesRef("test"))), new[] { 0.5945349f, 0.5945349f });
+            }
+            finally
+            {
+                searcher.Similarity = saved;
+            }
+        }
+
+        [Test]
+        public void TestIf()
+        {
+            AssertHits(new FunctionQuery(new IfFunction(new BytesRefFieldSource("id"), new ConstValueSource(1.0f), new ConstValueSource(2.0f)
+               )), new[] { 1f, 1f });
+            // true just if a value exists...
+            AssertHits(new FunctionQuery(new IfFunction(new LiteralValueSource("false"), new ConstValueSource(1.0f), new ConstValueSource(2.0f)
+               )), new[] { 1f, 1f });
+        }
+
+        [Test]
+        public void TestInt()
+        {
+            AssertHits(new FunctionQuery(new IntFieldSource("int")), new[] { 35f, 54f });
+        }
+
+        [Test]
+        public void TestJoinDocFreq()
+        {
+            AssertHits(new FunctionQuery(new JoinDocFreqValueSource("string", "text")), new[] { 2f, 0f });
+        }
+
+        [Test]
+        public void TestLinearFloat()
+        {
+            AssertHits(new FunctionQuery(new LinearFloatFunction(new ConstValueSource(2.0f), 3, 1)), new[] { 7f, 7f });
+        }
+
+        [Test]
+        public void TestLong()
+        {
+            AssertHits(new FunctionQuery(new LongFieldSource("long")), new[] { 4343f, 1954f });
+        }
+
+        [Test]
+        public void TestMaxDoc()
+        {
+            AssertHits(new FunctionQuery(new MaxDocValueSource()), new[] { 2f, 2f });
+        }
+
+        [Test]
+        public void TestMaxFloat()
+        {
+            AssertHits(new FunctionQuery(new MaxFloatFunction(new ValueSource[] { new ConstValueSource(1f), new ConstValueSource(2f) })), new[] { 2f, 2f });
+        }
+
+        [Test]
+        public void TestMinFloat()
+        {
+            AssertHits(new FunctionQuery(new MinFloatFunction(new ValueSource[] { new ConstValueSource(1f), new ConstValueSource(2f) })), new[] { 1f, 1f });
+        }
+
+        [Test]
+        public void TestNorm()
+        {
+            Similarity saved = searcher.Similarity;
+            try
+            {
+                // no norm field (so agnostic to indexed similarity)
+                searcher.Similarity = new DefaultSimilarity();
+                AssertHits(new FunctionQuery(new NormValueSource("byte")), new[] { 0f, 0f });
+            }
+            finally
+            {
+                searcher.Similarity = saved;
+            }
+        }
+
+        [Test]
+        public void TestNumDocs()
+        {
+            AssertHits(new FunctionQuery(new NumDocsValueSource()), new[] { 2f, 2f });
+        }
+
+        [Test]
+        public void TestPow()
+        {
+            AssertHits(new FunctionQuery(new PowFloatFunction(new ConstValueSource(2f), new ConstValueSource(3f))), new[] { 8f, 8f });
+        }
+
+        [Test]
+        public void TestProduct()
+        {
+            AssertHits(new FunctionQuery(new ProductFloatFunction(new ValueSource[] { new ConstValueSource(2f), new ConstValueSource(3f) })), new[] { 6f, 6f });
+        }
+
+        [Test]
+        public void TestQuery()
+        {
+            AssertHits(new FunctionQuery(new QueryValueSource(new FunctionQuery(new ConstValueSource(2f)), 0f)), new[] { 2f, 2f });
+        }
+
+        [Test]
+        public void TestRangeMap()
+        {
+            AssertHits(new FunctionQuery(new RangeMapFloatFunction(new FloatFieldSource("float"), 5, 6, 1, 0f)), new[] { 1f, 0f });
+            AssertHits(new FunctionQuery(new RangeMapFloatFunction(new FloatFieldSource("float"), 5, 6, new SumFloatFunction(new ValueSource[] { new ConstValueSource(1f), new ConstValueSource(2f) }), new ConstValueSource(11f))), new[] { 3f, 11f });
+        }
+
+        [Test]
+        public void TestReciprocal()
+        {
+            AssertHits(new FunctionQuery(new ReciprocalFloatFunction(new ConstValueSource(2f), 3, 1, 4)), new[] { 0.1f, 0.1f });
+        }
+
+        [Test]
+        public void TestScale()
+        {
+            AssertHits(new FunctionQuery(new ScaleFloatFunction(new IntFieldSource("int"), 0, 1)), new[] { 0.0f, 1.0f });
+        }
+
+        [Test]
+        public void TestShort()
+        {
+            AssertHits(new FunctionQuery(new ShortFieldSource("short")), new[] { 945f, 123f });
+        }
+
+        [Test]
+        public void TestSumFloat()
+        {
+            AssertHits(new FunctionQuery(new SumFloatFunction(new ValueSource[] { new ConstValueSource(1f), new ConstValueSource(2f) })), new[] { 3f, 3f });
+        }
+
+        [Test]
+        public void TestSumTotalTermFreq()
+        {
+            if (Codec.Default.Name.Equals("Lucene3x"))
+            {
+                AssertHits(new FunctionQuery(new SumTotalTermFreqValueSource("text")), new[] { -1f, -1f });
+            }
+            else
+            {
+                AssertHits(new FunctionQuery(new SumTotalTermFreqValueSource("text")), new[] { 8f, 8f });
+            }
+        }
+
+        [Test]
+        public void TestTermFreq()
+        {
+            AssertHits(new FunctionQuery(new TermFreqValueSource("bogus", "bogus", "text", new BytesRef("test"))), new[] { 3f, 1f });
+            AssertHits(new FunctionQuery(new TermFreqValueSource("bogus", "bogus", "string", new BytesRef("bar"))), new[] { 0f, 1f });
+        }
+
+        [Test]
+        public void TestTF()
+        {
+            Similarity saved = searcher.Similarity;
+            try
+            {
+                // no norm field (so agnostic to indexed similarity)
+                searcher.Similarity = new DefaultSimilarity();
+                AssertHits(new FunctionQuery(new TFValueSource("bogus", "bogus", "text", new BytesRef("test"))), new[] { (float)Math.Sqrt(3d), (float)Math.Sqrt(1d) });
+                AssertHits(new FunctionQuery(new TFValueSource("bogus", "bogus", "string", new BytesRef("bar"))), new[] { 0f, 1f });
+            }
+            finally
+            {
+                searcher.Similarity = saved;
+            }
+        }
+
+        [Test]
+        public void TestTotalTermFreq()
+        {
+            if (Codec.Default.Name.Equals("Lucene3x"))
+            {
+                AssertHits(new FunctionQuery(new TotalTermFreqValueSource("bogus", "bogus", "text", new BytesRef("test"))), new[] { -1f, -1f });
+            }
+            else
+            {
+                AssertHits(new FunctionQuery(new TotalTermFreqValueSource("bogus", "bogus", "text", new BytesRef("test"))), new[] { 4f, 4f });
+            }
+        }
+
+        private static void AssertHits(Query q, float[] scores)
+        {
+            ScoreDoc[] expected = new ScoreDoc[scores.Length];
+            int[] expectedDocs = new int[scores.Length];
+            for (int i = 0; i < expected.Length; i++)
+            {
+                expectedDocs[i] = i;
+                expected[i] = new ScoreDoc(i, scores[i]);
+            }
+            TopDocs docs = searcher.Search(q, null, documents.Count, new Sort(new SortField("id", SortField.Type_e.STRING)), true, false);
+            CheckHits.DoCheckHits(Random(), q, "", searcher, expectedDocs);
+            CheckHits.CheckHitsQuery(q, expected, docs.ScoreDocs, expectedDocs);
+            CheckHits.CheckExplanations(q, "", searcher);
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Lucene.Net.Tests.Queries.csproj
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Lucene.Net.Tests.Queries.csproj b/src/Lucene.Net.Tests.Queries/Lucene.Net.Tests.Queries.csproj
index 9264faa..125123d 100644
--- a/src/Lucene.Net.Tests.Queries/Lucene.Net.Tests.Queries.csproj
+++ b/src/Lucene.Net.Tests.Queries/Lucene.Net.Tests.Queries.csproj
@@ -46,8 +46,19 @@
     <Compile Include="BoostingQueryTest.cs" />
     <Compile Include="ChainedFilterTest.cs" />
     <Compile Include="CommonTermsQueryTest.cs" />
+    <Compile Include="Function\FunctionTestSetup.cs" />
+    <Compile Include="Function\TestBoostedQuery.cs" />
+    <Compile Include="Function\TestDocValuesFieldSources.cs" />
+    <Compile Include="Function\TestFieldScoreQuery.cs" />
+    <Compile Include="Function\TestFunctionQuerySort.cs" />
+    <Compile Include="Function\TestLongNormValueSource.cs" />
+    <Compile Include="Function\TestOrdValues.cs" />
+    <Compile Include="Function\TestValueSources.cs" />
+    <Compile Include="Mlt\TestMoreLikeThis.cs" />
     <Compile Include="Properties\AssemblyInfo.cs" />
     <Compile Include="TermFilterTest.cs" />
+    <Compile Include="TermsFilterTest.cs" />
+    <Compile Include="TestCustomScoreQuery.cs" />
   </ItemGroup>
   <ItemGroup>
     <ProjectReference Include="..\Lucene.Net.Core\Lucene.Net.csproj">
@@ -66,10 +77,7 @@
   <ItemGroup>
     <None Include="packages.config" />
   </ItemGroup>
-  <ItemGroup>
-    <Folder Include="Function\" />
-    <Folder Include="Mlt\" />
-  </ItemGroup>
+  <ItemGroup />
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/Mlt/TestMoreLikeThis.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/Mlt/TestMoreLikeThis.cs b/src/Lucene.Net.Tests.Queries/Mlt/TestMoreLikeThis.cs
new file mode 100644
index 0000000..49811fe
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/Mlt/TestMoreLikeThis.cs
@@ -0,0 +1,134 @@
+using System.Collections.Generic;
+using System.IO;
+using Lucene.Net.Analysis;
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.Queries.Mlt;
+using Lucene.Net.Search;
+using Lucene.Net.Util;
+using NUnit.Framework;
+using Directory = Lucene.Net.Store.Directory;
+
+namespace Lucene.Net.Tests.Queries.Mlt
+{
+    public class TestMoreLikeThis : LuceneTestCase
+    {
+        private Directory directory;
+        private IndexReader reader;
+        private IndexSearcher searcher;
+        
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+            directory = NewDirectory();
+            RandomIndexWriter writer = new RandomIndexWriter(Random(), directory);
+
+            // Add series of docs with specific information for MoreLikeThis
+            AddDoc(writer, "lucene");
+            AddDoc(writer, "lucene release");
+
+            reader = writer.Reader;
+            writer.Dispose();
+            searcher = NewSearcher(reader);
+        }
+        
+        [TearDown]
+        public override void TearDown()
+        {
+            reader.Dispose();
+            directory.Dispose();
+            base.TearDown();
+        }
+        
+        private static void AddDoc(RandomIndexWriter writer, string text)
+        {
+            Document doc = new Document();
+            doc.Add(NewTextField("text", text, Field.Store.YES));
+            writer.AddDocument(doc);
+        }
+        
+        [Test]
+        public void TestBoostFactor()
+        {
+            IDictionary<string, float?> originalValues = OriginalValues;
+
+            MoreLikeThis mlt = new MoreLikeThis(reader);
+            mlt.Analyzer = new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false);
+            mlt.MinDocFreq = 1;
+            mlt.MinTermFreq = 1;
+            mlt.MinWordLen = 1;
+            mlt.FieldNames = new[] { "text" };
+            mlt.Boost = true;
+
+            // this mean that every term boost factor will be multiplied by this
+            // number
+            float boostFactor = 5;
+            mlt.BoostFactor = boostFactor;
+
+            BooleanQuery query = (BooleanQuery)mlt.Like(new StringReader("lucene release"), "text");
+            IList<BooleanClause> clauses = query.Clauses;
+
+            assertEquals("Expected " + originalValues.Count + " clauses.", originalValues.Count, clauses.Count);
+
+            foreach (BooleanClause clause in clauses)
+            {
+                TermQuery tq = (TermQuery)clause.Query;
+                float? termBoost = originalValues[tq.Term.Text()];
+                assertNotNull("Expected term " + tq.Term.Text(), termBoost);
+
+                float totalBoost = (float) (termBoost * boostFactor);
+                assertEquals("Expected boost of " + totalBoost + " for term '" + tq.Term.Text() + "' got " + tq.Boost, totalBoost, tq.Boost, 0.0001);
+            }
+        }
+        
+        private IDictionary<string, float?> OriginalValues
+        {
+            get
+            {
+                IDictionary<string, float?> originalValues = new Dictionary<string, float?>();
+                MoreLikeThis mlt = new MoreLikeThis(reader);
+                mlt.Analyzer = new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false);
+                mlt.MinDocFreq = 1;
+                mlt.MinTermFreq = 1;
+                mlt.MinWordLen = 1;
+                mlt.FieldNames = new[] { "text" };
+                mlt.Boost = true;
+                BooleanQuery query = (BooleanQuery)mlt.Like(new StringReader("lucene release"), "text");
+                IList<BooleanClause> clauses = query.Clauses;
+
+                foreach (BooleanClause clause in clauses)
+                {
+                    TermQuery tq = (TermQuery)clause.Query;
+                    originalValues[tq.Term.Text()] = tq.Boost;
+                }
+                return originalValues;
+            }
+        }
+
+        // LUCENE-3326
+        [Test]
+        public void TestMultiFields()
+        {
+            MoreLikeThis mlt = new MoreLikeThis(reader);
+            mlt.Analyzer = new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false);
+            mlt.MinDocFreq = 1;
+            mlt.MinTermFreq = 1;
+            mlt.MinWordLen = 1;
+            mlt.FieldNames = new[] { "text", "foobar" };
+            mlt.Like(new StringReader("this is a test"), "foobar");
+        }
+
+        /// <summary>
+        /// just basic equals/hashcode etc
+        /// </summary>
+        [Test]
+        public void TestMoreLikeThisQuery()
+        {
+            Query query = new MoreLikeThisQuery("this is a test", new[] { "text" }, new MockAnalyzer(Random()), "text");
+            QueryUtils.Check(Random(), query, searcher);
+        }
+
+        // TODO: add tests for the MoreLikeThisQuery
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2808ec74/src/Lucene.Net.Tests.Queries/TermsFilterTest.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Tests.Queries/TermsFilterTest.cs b/src/Lucene.Net.Tests.Queries/TermsFilterTest.cs
new file mode 100644
index 0000000..1798e3c
--- /dev/null
+++ b/src/Lucene.Net.Tests.Queries/TermsFilterTest.cs
@@ -0,0 +1,326 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.Queries;
+using Lucene.Net.Randomized.Generators;
+using Lucene.Net.Search;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Tests.Queries
+{
+    public class TermsFilterTest : LuceneTestCase
+    {
+        [Test]
+        public void TestCachability()
+        {
+            TermsFilter a = TermsFilter(Random().NextBoolean(), new Term("field1", "a"), new Term("field1", "b"));
+            HashSet<Filter> cachedFilters = new HashSet<Filter>();
+            cachedFilters.Add(a);
+            TermsFilter b = TermsFilter(Random().NextBoolean(), new Term("field1", "b"), new Term("field1", "a"));
+            assertTrue("Must be cached", cachedFilters.Contains(b));
+            //duplicate term
+            assertTrue("Must be cached", cachedFilters.Contains(TermsFilter(true, new Term("field1", "a"), new Term("field1", "a"), new Term("field1", "b"))));
+            assertFalse("Must not be cached", cachedFilters.Contains(TermsFilter(Random().NextBoolean(), new Term("field1", "a"), new Term("field1", "a"), new Term("field1", "b"), new Term("field1", "v"))));
+        }
+
+        [Test]
+        public void TestMissingTerms()
+        {
+            string fieldName = "field1";
+            Directory rd = NewDirectory();
+            RandomIndexWriter w = new RandomIndexWriter(Random(), rd);
+            for (int i = 0; i < 100; i++)
+            {
+                Document doc = new Document();
+                int term = i * 10; //terms are units of 10;
+                doc.Add(NewStringField(fieldName, "" + term, Field.Store.YES));
+                w.AddDocument(doc);
+            }
+            IndexReader reader = SlowCompositeReaderWrapper.Wrap(w.Reader);
+            assertTrue(reader.Context is AtomicReaderContext);
+            AtomicReaderContext context = (AtomicReaderContext)reader.Context;
+            w.Dispose();
+
+            IList<Term> terms = new List<Term>();
+            terms.Add(new Term(fieldName, "19"));
+            FixedBitSet bits = (FixedBitSet)TermsFilter(Random().NextBoolean(), terms).GetDocIdSet(context, context.AtomicReader.LiveDocs);
+            assertNull("Must match nothing", bits);
+
+            terms.Add(new Term(fieldName, "20"));
+            bits = (FixedBitSet)TermsFilter(Random().NextBoolean(), terms).GetDocIdSet(context, context.AtomicReader.LiveDocs);
+            assertEquals("Must match 1", 1, bits.Cardinality());
+
+            terms.Add(new Term(fieldName, "10"));
+            bits = (FixedBitSet)TermsFilter(Random().NextBoolean(), terms).GetDocIdSet(context, context.AtomicReader.LiveDocs);
+            assertEquals("Must match 2", 2, bits.Cardinality());
+
+            terms.Add(new Term(fieldName, "00"));
+            bits = (FixedBitSet)TermsFilter(Random().NextBoolean(), terms).GetDocIdSet(context, context.AtomicReader.LiveDocs);
+            assertEquals("Must match 2", 2, bits.Cardinality());
+
+            reader.Dispose();
+            rd.Dispose();
+        }
+        
+        [Test]
+        public void TestMissingField()
+        {
+            string fieldName = "field1";
+            Directory rd1 = NewDirectory();
+            RandomIndexWriter w1 = new RandomIndexWriter(Random(), rd1);
+            Document doc = new Document();
+            doc.Add(NewStringField(fieldName, "content1", Field.Store.YES));
+            w1.AddDocument(doc);
+            IndexReader reader1 = w1.Reader;
+            w1.Dispose();
+
+            fieldName = "field2";
+            Directory rd2 = NewDirectory();
+            RandomIndexWriter w2 = new RandomIndexWriter(Random(), rd2);
+            doc = new Document();
+            doc.Add(NewStringField(fieldName, "content2", Field.Store.YES));
+            w2.AddDocument(doc);
+            IndexReader reader2 = w2.Reader;
+            w2.Dispose();
+
+            TermsFilter tf = new TermsFilter(new Term(fieldName, "content1"));
+            MultiReader multi = new MultiReader(reader1, reader2);
+            foreach (AtomicReaderContext context in multi.Leaves)
+            {
+                DocIdSet docIdSet = tf.GetDocIdSet(context, context.AtomicReader.LiveDocs);
+                if (context.Reader.DocFreq(new Term(fieldName, "content1")) == 0)
+                {
+                    assertNull(docIdSet);
+                }
+                else
+                {
+                    FixedBitSet bits = (FixedBitSet)docIdSet;
+                    assertTrue("Must be >= 0", bits.Cardinality() >= 0);
+                }
+            }
+            multi.Dispose();
+            reader1.Dispose();
+            reader2.Dispose();
+            rd1.Dispose();
+            rd2.Dispose();
+        }
+
+        [Test]
+        public void TestFieldNotPresent()
+        {
+            Directory dir = NewDirectory();
+            RandomIndexWriter w = new RandomIndexWriter(Random(), dir);
+            int num = AtLeast(3);
+            int skip = Random().Next(num);
+            var terms = new List<Term>();
+            for (int i = 0; i < num; i++)
+            {
+                terms.Add(new Term("field" + i, "content1"));
+                Document doc = new Document();
+                if (skip == i)
+                {
+                    continue;
+                }
+                doc.Add(NewStringField("field" + i, "content1", Field.Store.YES));
+                w.AddDocument(doc);
+            }
+
+            w.ForceMerge(1);
+            IndexReader reader = w.Reader;
+            w.Dispose();
+            assertEquals(1, reader.Leaves.size());
+
+
+
+            AtomicReaderContext context = reader.Leaves.First();
+            TermsFilter tf = new TermsFilter(terms);
+
+            FixedBitSet bits = (FixedBitSet)tf.GetDocIdSet(context, context.AtomicReader.LiveDocs);
+            assertEquals("Must be num fields - 1 since we skip only one field", num - 1, bits.Cardinality());
+            reader.Dispose();
+            dir.Dispose();
+        }
+
+        [Test]
+        public void TestSkipField()
+        {
+            Directory dir = NewDirectory();
+            RandomIndexWriter w = new RandomIndexWriter(Random(), dir);
+            int num = AtLeast(10);
+            var terms = new HashSet<Term>();
+            for (int i = 0; i < num; i++)
+            {
+                string field = "field" + Random().Next(100);
+                terms.Add(new Term(field, "content1"));
+                Document doc = new Document();
+                doc.Add(NewStringField(field, "content1", Field.Store.YES));
+                w.AddDocument(doc);
+            }
+            int randomFields = Random().Next(10);
+            for (int i = 0; i < randomFields; i++)
+            {
+                while (true)
+                {
+                    string field = "field" + Random().Next(100);
+                    Term t = new Term(field, "content1");
+                    if (!terms.Contains(t))
+                    {
+                        terms.Add(t);
+                        break;
+                    }
+                }
+            }
+            w.ForceMerge(1);
+            IndexReader reader = w.Reader;
+            w.Dispose();
+            assertEquals(1, reader.Leaves.size());
+            AtomicReaderContext context = reader.Leaves.First();
+            TermsFilter tf = new TermsFilter(terms.ToList());
+
+            FixedBitSet bits = (FixedBitSet)tf.GetDocIdSet(context, context.AtomicReader.LiveDocs);
+            assertEquals(context.Reader.NumDocs, bits.Cardinality());
+            reader.Dispose();
+            dir.Dispose();
+        }
+
+        [Test]
+        public void TestRandom()
+        {
+            Directory dir = NewDirectory();
+            RandomIndexWriter w = new RandomIndexWriter(Random(), dir);
+            int num = AtLeast(100);
+            bool singleField = Random().NextBoolean();
+            IList<Term> terms = new List<Term>();
+            for (int i = 0; i < num; i++)
+            {
+                string field = "field" + (singleField ? "1" : Random().Next(100).ToString());
+                string @string = TestUtil.RandomRealisticUnicodeString(Random());
+                terms.Add(new Term(field, @string));
+                Document doc = new Document();
+                doc.Add(NewStringField(field, @string, Field.Store.YES));
+                w.AddDocument(doc);
+            }
+            IndexReader reader = w.Reader;
+            w.Dispose();
+
+            IndexSearcher searcher = NewSearcher(reader);
+
+            int numQueries = AtLeast(10);
+            for (int i = 0; i < numQueries; i++)
+            {
+                CollectionsHelper.Shuffle(terms);
+                int numTerms = 1 + Random().Next(Math.Min(BooleanQuery.MaxClauseCount, terms.Count));
+                BooleanQuery bq = new BooleanQuery();
+                for (int j = 0; j < numTerms; j++)
+                {
+                    bq.Add(new BooleanClause(new TermQuery(terms[j]), BooleanClause.Occur.SHOULD));
+                }
+                TopDocs queryResult = searcher.Search(new ConstantScoreQuery(bq), reader.MaxDoc);
+
+                MatchAllDocsQuery matchAll = new MatchAllDocsQuery();
+                TermsFilter filter = TermsFilter(singleField, terms.SubList(0, numTerms));
+                TopDocs filterResult = searcher.Search(matchAll, filter, reader.MaxDoc);
+                assertEquals(filterResult.TotalHits, queryResult.TotalHits);
+                ScoreDoc[] scoreDocs = filterResult.ScoreDocs;
+                for (int j = 0; j < scoreDocs.Length; j++)
+                {
+                    assertEquals(scoreDocs[j].Doc, queryResult.ScoreDocs[j].Doc);
+                }
+            }
+
+            reader.Dispose();
+            dir.Dispose();
+        }
+
+        private TermsFilter TermsFilter(bool singleField, params Term[] terms)
+        {
+            return TermsFilter(singleField, terms.ToList());
+        }
+
+        private TermsFilter TermsFilter(bool singleField, IEnumerable<Term> termList)
+        {
+            if (!singleField)
+            {
+                return new TermsFilter(termList.ToList());
+            }
+            TermsFilter filter;
+            var bytes = new List<BytesRef>();
+            string field = null;
+            foreach (Term term in termList)
+            {
+                bytes.Add(term.Bytes);
+                if (field != null)
+                {
+                    assertEquals(term.Field, field);
+                }
+                field = term.Field;
+            }
+            assertNotNull(field);
+            filter = new TermsFilter(field, bytes);
+            return filter;
+        }
+
+        [Test]
+        public void TestHashCodeAndEquals()
+        {
+            int num = AtLeast(100);
+            bool singleField = Random().NextBoolean();
+            IList<Term> terms = new List<Term>();
+            var uniqueTerms = new HashSet<Term>();
+            for (int i = 0; i < num; i++)
+            {
+                string field = "field" + (singleField ? "1" : Random().Next(100).ToString());
+                string @string = TestUtil.RandomRealisticUnicodeString(Random());
+                terms.Add(new Term(field, @string));
+                uniqueTerms.Add(new Term(field, @string));
+                TermsFilter left = TermsFilter(singleField && Random().NextBoolean(), uniqueTerms);
+                CollectionsHelper.Shuffle(terms);
+                TermsFilter right = TermsFilter(singleField && Random().NextBoolean(), terms);
+                assertEquals(right, left);
+                assertEquals(right.GetHashCode(), left.GetHashCode());
+                if (uniqueTerms.Count > 1)
+                {
+                    IList<Term> asList = new List<Term>(uniqueTerms);
+                    asList.RemoveAt(0);
+                    TermsFilter notEqual = TermsFilter(singleField && Random().NextBoolean(), asList);
+                    assertFalse(left.Equals(notEqual));
+                    assertFalse(right.Equals(notEqual));
+                }
+            }
+        }
+
+        [Test]
+        public void TestSingleFieldEquals()
+        {
+            // Two terms with the same hash code
+            //assertEquals("AaAaBB".GetHashCode(), "BBBBBB".GetHashCode());
+            TermsFilter left = TermsFilter(true, new Term("id", "AaAaAa"), new Term("id", "AaAaBB"));
+            TermsFilter right = TermsFilter(true, new Term("id", "AaAaAa"), new Term("id", "BBBBBB"));
+            assertFalse(left.Equals(right));
+        }
+
+        [Test]
+        public void TestNoTerms()
+        {
+            var emptyTerms = new List<Term>();
+            var emptyBytesRef = new List<BytesRef>();
+
+            Assert.Throws<ArgumentException>(() => new TermsFilter(emptyTerms));
+            Assert.Throws<ArgumentException>(() => new TermsFilter(emptyTerms.ToArray()));
+            Assert.Throws<ArgumentException>(() => new TermsFilter(null, emptyBytesRef.ToArray()));
+            Assert.Throws<ArgumentException>(() => new TermsFilter(null, emptyBytesRef));
+        }
+
+        [Test]
+        public void TestToString()
+        {
+            TermsFilter termsFilter = new TermsFilter(new Term("field1", "a"), new Term("field1", "b"), new Term("field1", "c"));
+            assertEquals("field1:a field1:b field1:c", termsFilter.ToString());
+        }
+    }
+}
\ No newline at end of file