You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by mh...@apache.org on 2013/09/24 20:32:37 UTC

[01/50] [abbrv] git commit: Port: a bunch of Util test classes

Updated Branches:
  refs/heads/branch_4x ea1dc3c67 -> a8ad47860


Port: a bunch of Util test classes


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/d3c00f5a
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/d3c00f5a
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/d3c00f5a

Branch: refs/heads/branch_4x
Commit: d3c00f5a0b79338cb9fdaac7abfdfb622b948eb3
Parents: d4b0864
Author: James Blair <jm...@gmail.com>
Authored: Wed Jul 10 00:16:37 2013 -0400
Committer: James Blair <jm...@gmail.com>
Committed: Wed Jul 10 00:16:37 2013 -0400

----------------------------------------------------------------------
 test/core/Lucene.Net.Test.csproj           |  19 +-
 test/core/Support/RandomExtensions.cs      |  47 +++
 test/core/Util/ArrayUtilTest.cs            |  95 ------
 test/core/Util/StressRamUsageEstimator.cs  | 153 ++++++++++
 test/core/Util/Test2BPagedBytes.cs         |  62 ++++
 test/core/Util/TestArrayUtil.cs            | 390 ++++++++++++++++++++++++
 test/core/Util/TestAttributeSource.cs      | 120 +++-----
 test/core/Util/TestBitVector.cs            | 311 -------------------
 test/core/Util/TestByteBlockPool.cs        |  59 ++++
 test/core/Util/TestBytesRef.cs             |  68 +++++
 test/core/Util/TestBytesRefHash.cs         | 187 ++++++++++++
 test/core/Util/TestCharsRef.cs             | 147 +++++++++
 test/core/Util/TestCloseableThreadLocal.cs |  37 +--
 test/core/Util/TestCollectionUtil.cs       | 186 +++++++++++
 test/core/Util/TestDoubleBarrelLRUCache.cs | 212 +++++++++++++
 15 files changed, 1583 insertions(+), 510 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Lucene.Net.Test.csproj
----------------------------------------------------------------------
diff --git a/test/core/Lucene.Net.Test.csproj b/test/core/Lucene.Net.Test.csproj
index 212dbe8..1426b60 100644
--- a/test/core/Lucene.Net.Test.csproj
+++ b/test/core/Lucene.Net.Test.csproj
@@ -501,6 +501,7 @@
     <Compile Include="SupportClassException.cs" />
     <Compile Include="Support\BigObject.cs" />
     <Compile Include="Support\CollisionTester.cs" />
+    <Compile Include="Support\RandomExtensions.cs" />
     <Compile Include="Support\TestWeakDictionaryBehavior.cs" />
     <Compile Include="Support\TestWeakDictionary.cs" />
     <Compile Include="Support\SmallObject.cs" />
@@ -526,18 +527,23 @@
       <SubType>Code</SubType>
     </Compile>
     <Compile Include="Support\TestSupportClass.cs" />
-    <Compile Include="Util\ArrayUtilTest.cs" />
     <Compile Include="Util\Cache\TestSimpleLRUCache.cs" />
     <Compile Include="Util\English.cs">
       <SubType>Code</SubType>
     </Compile>
     <Compile Include="Util\LocalizedTestCase.cs" />
     <Compile Include="Util\Paths.cs" />
+    <Compile Include="Util\StressRamUsageEstimator.cs" />
+    <Compile Include="Util\Test2BPagedBytes.cs" />
+    <Compile Include="Util\TestArrayUtil.cs" />
     <Compile Include="Util\TestAttributeSource.cs" />
-    <Compile Include="Util\TestBitVector.cs">
-      <SubType>Code</SubType>
-    </Compile>
+    <Compile Include="Util\TestByteBlockPool.cs" />
+    <Compile Include="Util\TestBytesRef.cs" />
+    <Compile Include="Util\TestBytesRefHash.cs" />
+    <Compile Include="Util\TestCharsRef.cs" />
     <Compile Include="Util\TestCloseableThreadLocal.cs" />
+    <Compile Include="Util\TestCollectionUtil.cs" />
+    <Compile Include="Util\TestDoubleBarrelLRUCache.cs" />
     <Compile Include="Util\TestFieldCacheSanityChecker.cs" />
     <Compile Include="Util\TestIndexableBinaryStringTools.cs" />
     <Compile Include="Util\TestNumericUtils.cs" />
@@ -622,6 +628,11 @@
   <ItemGroup>
     <Content Include="UpdatedTests.txt" />
   </ItemGroup>
+  <ItemGroup>
+    <Folder Include="Util\Automaton\" />
+    <Folder Include="Util\Fst\" />
+    <Folder Include="Util\Packed\" />
+  </ItemGroup>
   <Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
   <PropertyGroup>
     <PreBuildEvent>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Support/RandomExtensions.cs
----------------------------------------------------------------------
diff --git a/test/core/Support/RandomExtensions.cs b/test/core/Support/RandomExtensions.cs
new file mode 100644
index 0000000..49625e5
--- /dev/null
+++ b/test/core/Support/RandomExtensions.cs
@@ -0,0 +1,47 @@
+using System;
+
+namespace Lucene.Net.Test.Support
+{
+    public static class RandomExtensions
+    {
+        private static bool BoolTieBreak = false;
+
+        public static long NextLong(this Random random)
+        {
+            return random.NextLong(long.MaxValue);
+        }
+
+        public static long NextLong(this Random random, long max)
+        {
+            return random.NextLong(0, max);
+        }
+
+        public static long NextLong(this Random random, long min, long max)
+        {
+            return (long)((random.NextDouble() * max + min) % max);
+        }
+
+        public static bool NextBool(this Random random)
+        {
+            var randInt = random.Next();
+            var adjusted = randInt - (int.MaxValue/2);
+            if (adjusted == 0)
+            {
+                BoolTieBreak = !BoolTieBreak;
+                return BoolTieBreak;
+            }
+            return adjusted > 0 ? true : false;
+        }
+
+        public static void NextBytes(this Random random, sbyte[] bytes)
+        {
+            var length = bytes.Length;
+            var randBytes = new byte[length];
+            random.NextBytes(randBytes);
+            for (var i = 0; i < bytes.Length; i++)
+            {
+                bytes[i] = (sbyte) randBytes[i];
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/ArrayUtilTest.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/ArrayUtilTest.cs b/test/core/Util/ArrayUtilTest.cs
deleted file mode 100644
index 8d312aa..0000000
--- a/test/core/Util/ArrayUtilTest.cs
+++ /dev/null
@@ -1,95 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-
-using NUnit.Framework;
-
-namespace Lucene.Net.Util
-{
-	
-	
-	/// <summary> 
-	/// 
-	/// 
-	/// </summary>
-    [TestFixture]
-	public class ArrayUtilTest:LuceneTestCase
-	{
-		
-        [Test]
-		public virtual void  TestParseInt()
-		{
-			int test;
-			try
-			{
-				test = ArrayUtil.ParseInt("".ToCharArray());
-				Assert.IsTrue(false);
-			}
-			catch (System.FormatException)
-			{
-				//expected
-			}
-			try
-			{
-				test = ArrayUtil.ParseInt("foo".ToCharArray());
-				Assert.IsTrue(false);
-			}
-			catch (System.FormatException)
-			{
-				//expected
-			}
-			try
-			{
-				test = ArrayUtil.ParseInt(System.Convert.ToString(System.Int64.MaxValue).ToCharArray());
-				Assert.IsTrue(false);
-			}
-			catch (System.FormatException)
-			{
-				//expected
-			}
-			try
-			{
-				test = ArrayUtil.ParseInt("0.34".ToCharArray());
-				Assert.IsTrue(false);
-			}
-			catch (System.FormatException)
-			{
-				//expected
-			}
-			
-			try
-			{
-				test = ArrayUtil.ParseInt("1".ToCharArray());
-				Assert.IsTrue(test == 1, test + " does not equal: " + 1);
-				test = ArrayUtil.ParseInt("-10000".ToCharArray());
-				Assert.IsTrue(test == - 10000, test + " does not equal: " + (- 10000));
-				test = ArrayUtil.ParseInt("1923".ToCharArray());
-				Assert.IsTrue(test == 1923, test + " does not equal: " + 1923);
-				test = ArrayUtil.ParseInt("-1".ToCharArray());
-				Assert.IsTrue(test == - 1, test + " does not equal: " + (- 1));
-				test = ArrayUtil.ParseInt("foo 1923 bar".ToCharArray(), 4, 4);
-				Assert.IsTrue(test == 1923, test + " does not equal: " + 1923);
-			}
-			catch (System.FormatException e)
-			{
-				System.Console.Error.WriteLine(e.StackTrace);
-				Assert.IsTrue(false);
-			}
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/StressRamUsageEstimator.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/StressRamUsageEstimator.cs b/test/core/Util/StressRamUsageEstimator.cs
new file mode 100644
index 0000000..8472fca
--- /dev/null
+++ b/test/core/Util/StressRamUsageEstimator.cs
@@ -0,0 +1,153 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    public class StressRamUsageEstimator : LuceneTestCase
+    {
+        internal class Entry
+        {
+            internal object o;
+            internal Entry next;
+
+            public Entry CreateNext(object o)
+            {
+                var e = new Entry {o = o, next = next};
+                this.next = e;
+                return e;
+            }
+        }
+
+        [Ignore]
+        public void TestChainedEstimation()
+        {
+            MemoryMXBean memoryMXBean = ManagementFactory.GetMemoryMXBean();
+
+            var rnd = new Random();
+            var first = new Entry();
+
+            try
+            {
+                while (true)
+                {
+                    // Check the current memory consumption and provide the estimate
+                    var jvmUsed = memoryMXBean.GetHeapMemoryUsage().GetUsed();
+                    var estimated = RamUsageEstimator.SizeOf(first);
+                    Console.WriteLine("{0}, {1}", jvmUsed, estimated);
+
+                    // Make a batch of objects
+                    for (var i = 0; i < 5000; i++)
+                    {
+                        first.CreateNext(new sbyte[rnd.Next(1024)]);
+                    }
+                }
+            }
+            catch (OutOfMemoryException ex)
+            {
+                // Release and quit
+            }
+        }
+
+        internal volatile object guard;
+
+        // This shows an easy stack overflow because we're counting recursively.
+        public void TestLargeSetOfByteArrays()
+        {
+            MemoryMXBean memoryMXBean = ManagementFactory.GetMemoryMXBean();
+
+            CauseGC();
+            var before = memoryMXBean.GetHeapMemoryUsage().GetUsed();
+            var all = new object[1000000];
+            for (var i = 0; i < all.Length; i++)
+            {
+                all[i] = new byte[new Random().Next(3)];
+            }
+            CauseGC();
+            var after = memoryMXBean.GetHeapMemoryUsage().GetUsed();
+            Console.WriteLine("mx:  " + RamUsageEstimator.HumanReadableUnits(after - before));
+            Console.WriteLine("rue: " + RamUsageEstimator.HumanReadableUnits(ShallowSizeOf(all)));
+
+            guard = all;
+        }
+
+        private long ShallowSizeOf(object[] all)
+        {
+            return RamUsageEstimator.ShallowSizeOf(all)
+                + all.Sum(o => RamUsageEstimator.ShallowSizeOf(o));
+        }
+
+        private long ShallowSizeOf(object[][] all)
+        {
+            var s = RamUsageEstimator.ShallowSizeOf(all);
+            foreach (var o in all)
+            {
+                s += RamUsageEstimator.ShallowSizeOf(o);
+                s += o.Sum(o2 => RamUsageEstimator.ShallowSizeOf(o2));
+            }
+            return s;
+        }
+
+        public void TestSimpleByteArrays()
+        {
+            MemoryMXBean memoryMXBean = ManagementFactory.GetMemoryMXBean();
+
+            var all = new object[0][];
+            try
+            {
+                while (true)
+                {
+                    // Check the current memory consumption and provide the estimate.
+                    CauseGc();
+                    var mu = memoryMXBean.GetHeapMemoryUsage();
+                    var estimated = ShallowSizeOf(all);
+                    if (estimated > 50 * RamUsageEstimator.ONE_MB)
+                    {
+                        break;
+                    }
+
+                    Console.WriteLine("{0}\t{1}]t{2}",
+                        RamUsageEstimator.HumanReadableUnits(mu.GetUsed()),
+                        RamUsageEstimator.HumanReadableUnits(mu.GetMax()),
+                        RamUsageEstimator.HumanReadableUnits(estimated));
+
+                    // Make another batch of objects.
+                    var seg = new object[10000];
+                    all = Arrays.CopyOf(all, all.Length + 1);
+                    all[all.Length - 1] = seg;
+                    for (var i = 0; i < seg.Length; i++)
+                    {
+                        seg[i] = new byte[new Random().Next(7)];
+                    }
+                }
+            }
+            catch (OutOfMemoryException ex)
+            {
+                // Release and quit.
+            }
+        }
+
+        private void CauseGc()
+        {
+            var garbageCollectorMXBeans = ManagementFactory.GetGarbageCollectorMXBeans();
+            var ccounts = new List<long>();
+            foreach (var g in garbageCollectorMXBeans)
+            {
+                ccounts.Add(g.GetCollectionCount());
+            }
+            var ccounts2 = new List<long>();
+            do
+            {
+                GC.Collect();
+                ccounts.Clear();
+                foreach (var g in garbageCollectorMXBeans)
+                {
+                    ccounts2.Add(g.GetCollectionCount());
+                }
+            } while (ccounts2.Equals(ccounts));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/Test2BPagedBytes.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Test2BPagedBytes.cs b/test/core/Util/Test2BPagedBytes.cs
new file mode 100644
index 0000000..31fca25
--- /dev/null
+++ b/test/core/Util/Test2BPagedBytes.cs
@@ -0,0 +1,62 @@
+using System;
+using Lucene.Net.Store;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [Ignore("You must increase heap to > 2 G to run this")]
+    [TestFixture]
+    public class Test2BPagedBytes : LuceneTestCase
+    {
+        [Test]
+        public void Test()
+        {
+            BaseDirectoryWrapper dir = NewFSDirectory(_TestUtil.GetTempDir("test2BPagedBytes"));
+            if (dir is MockDirectoryWrapper)
+            {
+                ((MockDirectoryWrapper) dir).SetThrottling(MockDirectoryWrapper.Throttling.NEVER);
+            }
+            var pb = new PagedBytes(15);
+            IndexOutput dataOutput = dir.CreateOutput("foo", IOContext.DEFAULT);
+            long netBytes = 0;
+            var seed = new Random().Next();
+            long lastFP = 0;
+            var r2 = new Random(seed);
+            while (netBytes < 1.1*int.MaxValue)
+            {
+                var numBytes = _TestUtil.NextInt(r2, 1, 32768);
+                var bytes = new sbyte[numBytes];
+                r2.NextBytes(bytes);
+                dataOutput.WriteBytes(bytes, bytes.Length);
+                var fp = dataOutput.FilePointer;
+                // assert fp == lastFP + numBytes;
+                lastFP = fp;
+                netBytes += numBytes;
+            }
+            dataOutput.Dispose();
+            IndexInput input = dir.OpenInput("foo", IOContext.DEFAULT);
+            pb.Copy(input, input.Length);
+            input.Dispose();
+            var reader = pb.Freeze(true);
+
+            r2 = new Random(seed);
+            netBytes = 0;
+            while (netBytes < 1.1 * int.MaxValue)
+            {
+                int numBytes = _TestUtil.NextInt(r2, 1, 32768);
+                var bytes = new sbyte[numBytes];
+                r2.NextBytes(bytes);
+                var expected = new BytesRef(bytes);
+
+                var actual = new BytesRef();
+                reader.FillSlice(actual, netBytes, numBytes);
+                assertEquals(expected, actual);
+
+                netBytes += numBytes;
+            }
+            dir.Close();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestArrayUtil.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestArrayUtil.cs b/test/core/Util/TestArrayUtil.cs
new file mode 100644
index 0000000..2e1b644
--- /dev/null
+++ b/test/core/Util/TestArrayUtil.cs
@@ -0,0 +1,390 @@
+using System;
+using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestArrayUtil : LuceneTestCase
+    {
+        // Ensure ArrayUtil.getNextSize gives linear amortized cost of realloc/copy
+        [Test]
+        public void TestGrowth()
+        {
+            int currentSize = 0;
+            long copyCost = 0;
+
+            // Make sure ArrayUtil hits int.MaxValue, if we insist:
+            while (currentSize != int.MaxValue)
+            {
+                int nextSize = ArrayUtil.Oversize(1 + currentSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
+                Assert.IsTrue(nextSize > currentSize);
+                if (currentSize > 0)
+                {
+                    copyCost += currentSize;
+                    var copyCostPerElement = ((double)copyCost) / currentSize;
+                    Assert.IsTrue("cost " + copyCostPerElement, copyCostPerElement < 10.0);
+                }
+                currentSize = nextSize;
+            }
+        }
+
+        [Test]
+        public void TextMaxSize()
+        {
+            // Intentionally pass invalid elemSizes:
+            for (var elemSize = 0; elemSize < 10; elemSize++)
+            {
+                assertEquals(int.MaxValue, ArrayUtil.Oversize(int.MaxValue, elemSize));
+                assertEquals(int.MaxValue, ArrayUtil.Oversize(int.MaxValue - 1, elemSize));
+            }
+        }
+
+        [Test]
+        public void TestInvalidElementSizes()
+        {
+            var rnd = new Random();
+            int num = AtLeast(10000);
+            for (var iter = 0; iter < num; iter++)
+            {
+                var minTargetSize = rnd.Next(int.MaxValue);
+                var elemSize = rnd.Next(11);
+                var v = ArrayUtil.Oversize(minTargetSize, elemSize);
+                Assert.IsTrue(v >= minTargetSize);
+            }
+        }
+
+        [Test]
+        public void TestParseInt()
+        {
+            int test;
+            try
+            {
+                test = ArrayUtil.ParseInt("".ToCharArray());
+                Assert.IsTrue(false);
+            }
+            catch (FormatException e)
+            {
+                //expected
+            }
+            try
+            {
+                test = ArrayUtil.ParseInt("foo".ToCharArray());
+                Assert.IsTrue(false);
+            }
+            catch (FormatException e)
+            {
+                //expected
+            }
+            try
+            {
+                test = ArrayUtil.ParseInt(long.MaxValue.ToString().ToCharArray());
+                Assert.IsTrue(false);
+            }
+            catch (FormatException e)
+            {
+                //expected
+            }
+            try
+            {
+                test = ArrayUtil.ParseInt("0.34".ToCharArray());
+                Assert.IsTrue(false);
+            }
+            catch (FormatException e)
+            {
+                //expected
+            }
+
+            try
+            {
+                test = ArrayUtil.ParseInt("1".ToCharArray());
+                Assert.IsTrue(test == 1, test + " does not equal: " + 1);
+                test = ArrayUtil.ParseInt("-10000".ToCharArray());
+                Assert.IsTrue(test == -10000, test + " does not equal: " + -10000);
+                test = ArrayUtil.ParseInt("1923".ToCharArray());
+                Assert.IsTrue(test == 1923, test + " does not equal: " + 1923);
+                test = ArrayUtil.ParseInt("-1".ToCharArray());
+                Assert.IsTrue(test == -1, test + " does not equal: " + -1);
+                test = ArrayUtil.ParseInt("foo 1923 bar".ToCharArray(), 4, 4);
+                Assert.IsTrue(test == 1923, test + " does not equal: " + 1923);
+            }
+            catch (FormatException e)
+            {
+                Console.WriteLine(e.StackTrace);
+                Assert.IsTrue(false);
+            }
+        }
+
+        [Test]
+        public void TestSliceEquals()
+        {
+            var left = "this is equal";
+            var right = left;
+            var leftChars = left.ToCharArray();
+            var rightChars = right.ToCharArray();
+            Assert.IsTrue(ArrayUtil.Equals(leftChars, 0, rightChars, 0, left.Length), left + " does not equal: " + right);
+
+            Assert.IsFalse(ArrayUtil.Equals(leftChars, 1, rightChars, 0, left.Length), left + " does not equal: " + right);
+            Assert.IsFalse(ArrayUtil.Equals(leftChars, 1, rightChars, 2, left.Length), left + " does not equal: " + right);
+
+            Assert.IsFalse(ArrayUtil.Equals(leftChars, 25, rightChars, 0, left.Length), left + " does not equal: " + right);
+            Assert.IsFalse(ArrayUtil.Equals(leftChars, 12, rightChars, 0, left.Length), left + " does not equal: " + right);
+        }
+
+        private int[] CreateRandomArray(int maxSize)
+        {
+            var rnd = new Random();
+            var a = new int[rnd.Next(maxSize) + 1];
+            for (var i = 0; i < a.Length; i++)
+            {
+                a[i] = rnd.Next(a.Length);
+            }
+            return a;
+        }
+
+        [Test]
+        public virtual void TestQuickSort()
+        {
+            int num = AtLeast(50);
+            for (var i = 0; i < num; i++)
+            {
+                int[] a1 = CreateRandomArray(2000), a2 = (int[])a1.Clone();
+                ArrayUtil.QuickSort(a1);
+                Array.Sort(a2);
+                Assert.IsTrue(a2.Equals(a1));
+
+                a1 = CreateRandomArray(2000);
+                a2 = (int[])a1.Clone();
+                ArrayUtil.QuickSort(a1, Collections.ReverseOrder());
+                Array.Sort(a2, Collections.ReverseOrder());
+                Assert.Equals(a1, a2);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                ArrayUtil.QuickSort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+            }
+        }
+
+        private int[] CreateSparseRandomArray(int maxSize)
+        {
+            var rnd = new Random();
+            var a = new int[rnd.Next(maxSize) + 1];
+            for (var i = 0; i < a.Length; i++)
+            {
+                a[i] = rnd.Next(2);
+            }
+            return a;
+        }
+
+        [Test]
+        public virtual void TestQuickToMergeSortFallback()
+        {
+            int num = AtLeast(50);
+            for (var i = 0; i < num; i++)
+            {
+                int[] a1 = CreateSparseRandomArray(40000), a2 = (int[])a1.Clone();
+                ArrayUtil.QuickSort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+            }
+        }
+
+        [Test]
+        public virtual void TestMergeSort()
+        {
+            int num = AtLeast(50);
+            for (var i = 0; i < num; i++)
+            {
+                int[] a1 = CreateRandomArray(2000), a2 = (int[])a1.Clone();
+                ArrayUtil.MergeSort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+
+                a1 = CreateRandomArray(2000);
+                a2 = (int[])a1.Clone();
+                ArrayUtil.MergeSort(a1, Collections.ReverseOrder());
+                Array.Sort(a2, Collections.ReverseOrder());
+                Assert.Equals(a2, a1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                ArrayUtil.MergeSort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+            }
+        }
+
+        [Test]
+        public virtual void TestTimSort()
+        {
+            int num = AtLeast(65);
+            for (var i = 0; i < num; i++)
+            {
+                int[] a1 = CreateRandomArray(2000), a2 = (int[])a1.Clone();
+                ArrayUtil.TimSort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+
+                a1 = CreateRandomArray(2000);
+                a2 = (int[])a1.Clone();
+                ArrayUtil.TimSort(a1, Collections.ReverseOrder());
+                Array.Sort(a2, Collections.ReverseOrder());
+                Assert.Equals(a2, a1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                ArrayUtil.TimSort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+            }
+        }
+
+        [Test]
+        public virtual void TestInsertionSort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                int[] a1 = CreateRandomArray(30), a2 = (int[])a1.Clone();
+                ArrayUtil.InsertionSort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+
+                a1 = CreateRandomArray(30);
+                a2 = (int[])a1.Clone();
+                ArrayUtil.InsertionSort(a1, Collections.ReverseOrder());
+                Array.Sort(a2, Collections.ReverseOrder());
+                Assert.Equals(a2, a1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                ArrayUtil.InsertionSort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+            }
+        }
+
+        [Test]
+        public virtual void TestBinarySort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                int[] a1 = CreateRandomArray(30), a2 = (int[])a1.Clone();
+                ArrayUtil.BinarySort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+
+                a1 = CreateRandomArray(30);
+                a2 = (int[])a1.Clone();
+                ArrayUtil.BinarySort(a1, Collections.ReverseOrder());
+                Array.Sort(a2, Collections.ReverseOrder());
+                Assert.Equals(a2, a1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                ArrayUtil.BinarySort(a1);
+                Array.Sort(a2);
+                Assert.Equals(a2, a1);
+            }
+        }
+
+        internal class Item : IComparable<Item>
+        {
+            int val, order;
+
+            internal Item(int val, int order)
+            {
+                this.val = val;
+                this.order = order;
+            }
+
+            public int CompareTo(Item other)
+            {
+                return this.order - other.order;
+            }
+
+            public override string ToString()
+            {
+                return val.ToString();
+            }
+        }
+
+        [Test]
+        public virtual void TestMergeSortStability()
+        {
+            var rnd = new Random();
+            var items = new Item[100];
+            for (var i = 0; i < items.Length; i++)
+            {
+                // half of the items have value but same order. The value of this items is sorted,
+                // so they should always be in order after sorting.
+                // The other half has defined order, but no (-1) value (they should appear after
+                // all above, when sorted).
+                var equal = rnd.NextBool();
+                items[i] = new Item(equal ? (i + 1) : -1, equal ? 0 : (rnd.Next(1000) + 1));
+            }
+
+            if (VERBOSE) Console.WriteLine("Before: " + Arrays.ToString(items));
+            // if you replace this with ArrayUtil.quickSort(), test should fail:
+            ArrayUtil.MergeSort(items);
+            if (VERBOSE) Console.WriteLine("Sorted: " + Arrays.ToString(items));
+
+            var last = items[0];
+            for (var i = 1; i < items.Length; i++)
+            {
+                var act = items[i];
+                if (act.Order == 0)
+                {
+                    // order of "equal" items should be not mixed up
+                    Assert.IsTrue(act.Val > last.Val);
+                }
+                Assert.IsTrue(act.Order >= last.Order);
+                last = act;
+            }
+        }
+
+        [Test]
+        public virtual void TestTimSortStability()
+        {
+            var rnd = new Random();
+            var items = new Item[100];
+            for (var i = 0; i < items.Length; i++)
+            {
+                // half of the items have value but same order. The value of this items is sorted,
+                // so they should always be in order after sorting.
+                // The other half has defined order, but no (-1) value (they should appear after
+                // all above, when sorted).
+                var equal = rnd.NextBool();
+                items[i] = new Item(equal ? (i + 1) : -1, equal ? 0 : (rnd.Next(1000) + 1));
+            }
+
+            if (VERBOSE) Console.WriteLine("Before: " + Arrays.ToString(items));
+            // if you replace this with ArrayUtil.quickSort(), test should fail:
+            ArrayUtil.TimSort(items);
+            if (VERBOSE) Console.WriteLine("Sorted: " + Arrays.ToString(items));
+
+            var last = items[0];
+            for (var i = 1; i < items.Length; i++)
+            {
+                var act = items[i];
+                if (act.Order == 0)
+                {
+                    // order of "equal" items should be not mixed up
+                    Assert.IsTrue(act.Val > last.Val);
+                }
+                Assert.IsTrue(act.Order >= last.Order);
+                last = act;
+            }
+        }
+
+        // Should produce no excepetions
+        [Test]
+        public virtual void TestEmptyArraySort()
+        {
+            var a = new int[0];
+            ArrayUtil.QuickSort(a);
+            ArrayUtil.MergeSort(a);
+            ArrayUtil.InsertionSort(a);
+            ArrayUtil.BinarySort(a);
+            ArrayUtil.TimSort(a);
+            ArrayUtil.QuickSort(a, Collections.ReverseOrder());
+            ArrayUtil.MergeSort(a, Collections.ReverseOrder());
+            ArrayUtil.TimSort(a, Collections.ReverseOrder());
+            ArrayUtil.InsertionSort(a, Collections.ReverseOrder());
+            ArrayUtil.BinarySort(a, Collections.ReverseOrder());
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestAttributeSource.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestAttributeSource.cs b/test/core/Util/TestAttributeSource.cs
index 395ac16..b9d8595 100644
--- a/test/core/Util/TestAttributeSource.cs
+++ b/test/core/Util/TestAttributeSource.cs
@@ -16,7 +16,7 @@
  */
 
 using System;
-
+using System.Collections;
 using NUnit.Framework;
 
 using Token = Lucene.Net.Analysis.Token;
@@ -34,48 +34,48 @@ namespace Lucene.Net.Util
         public virtual void TestCaptureState()
         {
             // init a first instance
-            AttributeSource src = new AttributeSource();
-            ITermAttribute termAtt = src.AddAttribute<ITermAttribute>();
-            ITypeAttribute typeAtt = src.AddAttribute<ITypeAttribute>();
-            termAtt.SetTermBuffer("TestTerm");
+            var src = new AttributeSource();
+            var termAtt = src.AddAttribute<ICharTermAttribute>();
+            var typeAtt = src.AddAttribute<ITypeAttribute>();
+            termAtt.Append("TestTerm");
             typeAtt.Type = "TestType";
-            int hashCode = src.GetHashCode();
+            var hashCode = src.GetHashCode();
 
-            AttributeSource.State state = src.CaptureState();
+            var state = src.CaptureState();
 
             // modify the attributes
-            termAtt.SetTermBuffer("AnotherTestTerm");
+            termAtt.SetEmpty().Append("AnotherTestTerm");
             typeAtt.Type = "AnotherTestType";
             Assert.IsTrue(hashCode != src.GetHashCode(), "Hash code should be different");
 
             src.RestoreState(state);
-            Assert.AreEqual("TestTerm", termAtt.Term);
+            Assert.AreEqual("TestTerm", termAtt.ToString());
             Assert.AreEqual("TestType", typeAtt.Type);
             Assert.AreEqual(hashCode, src.GetHashCode(), "Hash code should be equal after restore");
 
             // restore into an exact configured copy
-            AttributeSource copy = new AttributeSource();
-            copy.AddAttribute<ITermAttribute>();
+            var copy = new AttributeSource();
+            copy.AddAttribute<ICharTermAttribute>();
             copy.AddAttribute<ITypeAttribute>();
             copy.RestoreState(state);
             Assert.AreEqual(src.GetHashCode(), copy.GetHashCode(), "Both AttributeSources should have same hashCode after restore");
             Assert.AreEqual(src, copy, "Both AttributeSources should be equal after restore");
 
             // init a second instance (with attributes in different order and one additional attribute)
-            AttributeSource src2 = new AttributeSource();
+            var src2 = new AttributeSource();
             typeAtt = src2.AddAttribute<ITypeAttribute>();
-            IFlagsAttribute flagsAtt = src2.AddAttribute<IFlagsAttribute>();
-            termAtt = src2.AddAttribute<ITermAttribute>();
+            var flagsAtt = src2.AddAttribute<IFlagsAttribute>();
+            termAtt = src2.AddAttribute<ICharTermAttribute>();
             flagsAtt.Flags = 12345;
 
             src2.RestoreState(state);
-            Assert.AreEqual("TestTerm", termAtt.Term);
+            Assert.AreEqual("TestTerm", termAtt.ToString());
             Assert.AreEqual("TestType", typeAtt.Type);
             Assert.AreEqual(12345, flagsAtt.Flags, "FlagsAttribute should not be touched");
 
             // init a third instance missing one Attribute
-            AttributeSource src3 = new AttributeSource();
-            termAtt = src3.AddAttribute<ITermAttribute>();
+            var src3 = new AttributeSource();
+            termAtt = src3.AddAttribute<ICharTermAttribute>();
 
             Assert.Throws<ArgumentException>(() => src3.RestoreState(state),
                                              "The third instance is missing the TypeAttribute, so restoreState() should throw IllegalArgumentException");
@@ -84,66 +84,35 @@ namespace Lucene.Net.Util
         [Test]
         public virtual void TestCloneAttributes()
         {
-            AttributeSource src = new AttributeSource();
-            ITermAttribute termAtt = src.AddAttribute<ITermAttribute>();
-            ITypeAttribute typeAtt = src.AddAttribute<ITypeAttribute>();
-            termAtt.SetTermBuffer("TestTerm");
+            var src = new AttributeSource();
+            var flagsAtt = src.AddAttribute<IFlagsAttribute>();
+            var typeAtt = src.AddAttribute<ITypeAttribute>();
+            flagsAtt.Flags = 1234;
             typeAtt.Type = "TestType";
 
-            AttributeSource clone = src.CloneAttributes();
-            System.Collections.Generic.IEnumerator<Type> it = clone.GetAttributeTypesIterator().GetEnumerator();
+            var clone = src.CloneAttributes();
+            var it = clone.GetAttributeTypesIterator().GetEnumerator();
             Assert.IsTrue(it.MoveNext());
-            Assert.AreEqual(typeof(ITermAttribute), it.Current, "TermAttribute must be the first attribute");
+            Assert.AreEqual(typeof(IFlagsAttribute), it.Current, "FlagsAttribute must be the first attribute");
             Assert.IsTrue(it.MoveNext());
             Assert.AreEqual(typeof(ITypeAttribute), it.Current, "TypeAttribute must be the second attribute");
             Assert.IsFalse(it.MoveNext(), "No more attributes");
 
-            ITermAttribute termAtt2 = clone.GetAttribute<ITermAttribute>();
-            ITypeAttribute typeAtt2 = clone.GetAttribute<ITypeAttribute>();
-            Assert.IsFalse(ReferenceEquals(termAtt2, termAtt), "TermAttribute of original and clone must be different instances");
-            Assert.IsFalse(ReferenceEquals(typeAtt2, typeAtt), "TypeAttribute of original and clone must be different instances");
-            Assert.AreEqual(termAtt2, termAtt, "TermAttribute of original and clone must be equal");
+            var flagsAtt2 = clone.GetAttribute<IFlagsAttribute>();
+            var typeAtt2 = clone.GetAttribute<ITypeAttribute>();
+            Assert.That(flagsAtt2 != flagsAtt, "TermAttribute of original and clone must be different instances");
+            Assert.That(typeAtt2 != typeAtt, "TypeAttribute of original and clone must be different instances");
+            Assert.AreEqual(flagsAtt2, flagsAtt, "TermAttribute of original and clone must be equal");
             Assert.AreEqual(typeAtt2, typeAtt, "TypeAttribute of original and clone must be equal");
         }
 
         [Test]
-        public virtual void TestToStringAndMultiAttributeImplementations()
-        {
-            AttributeSource src = new AttributeSource();
-            ITermAttribute termAtt = src.AddAttribute<ITermAttribute>();
-            ITypeAttribute typeAtt = src.AddAttribute<ITypeAttribute>();
-            termAtt.SetTermBuffer("TestTerm");
-            typeAtt.Type = "TestType";
-            Assert.AreEqual("(" + termAtt.ToString() + "," + typeAtt.ToString() + ")", src.ToString(), "Attributes should appear in original order");
-            System.Collections.Generic.IEnumerator<Attribute> it = src.GetAttributeImplsIterator().GetEnumerator();
-            Assert.IsTrue(it.MoveNext(), "Iterator should have 2 attributes left");
-            Assert.AreSame(termAtt, it.Current, "First AttributeImpl from iterator should be termAtt");
-            Assert.IsTrue(it.MoveNext(), "Iterator should have 1 attributes left");
-            Assert.AreSame(typeAtt, it.Current, "Second AttributeImpl from iterator should be typeAtt");
-            Assert.IsFalse(it.MoveNext(), "Iterator should have 0 attributes left");
-
-            src = new AttributeSource();
-            src.AddAttributeImpl(new Token());
-            // this should not add a new attribute as Token implements TermAttribute, too
-            termAtt = src.AddAttribute<ITermAttribute>();
-            Assert.IsTrue(termAtt is Token, "TermAttribute should be implemented by Token");
-            // get the Token attribute and check, that it is the only one
-            it = src.GetAttributeImplsIterator().GetEnumerator();
-            Assert.IsTrue(it.MoveNext());
-            Token tok = (Token)it.Current;
-            Assert.IsFalse(it.MoveNext(), "There should be only one attribute implementation instance");
-
-            termAtt.SetTermBuffer("TestTerm");
-            Assert.AreEqual("(" + tok.ToString() + ")", src.ToString(), "Token should only printed once");
-        }
-
-        [Test]
         public void TestDefaultAttributeFactory()
         {
-            AttributeSource src = new AttributeSource();
+            var src = new AttributeSource();
 
-            Assert.IsTrue(src.AddAttribute<ITermAttribute>() is TermAttribute,
-                          "TermAttribute is not implemented by TermAttributeImpl");
+            Assert.IsTrue(src.AddAttribute<ICharTermAttribute>() is CharTermAttribute,
+                          "CharTermAttribute is not implemented by CharTermAttributeImpl");
             Assert.IsTrue(src.AddAttribute<IOffsetAttribute>() is OffsetAttribute,
                           "OffsetAttribute is not implemented by OffsetAttributeImpl");
             Assert.IsTrue(src.AddAttribute<IFlagsAttribute>() is FlagsAttribute,
@@ -162,16 +131,25 @@ namespace Lucene.Net.Util
             var src = new AttributeSource();
             Assert.Throws<ArgumentException>(() => src.AddAttribute<Token>(), "Should throw ArgumentException");
 
-            src = new AttributeSource();
+            src = new AttributeSource(Token.TOKEN_ATTRIBUTE_FACTORY);
             Assert.Throws<ArgumentException>(() => src.AddAttribute<Token>(), "Should throw ArgumentException");
+        
+            src = new AttributeSource();
+            // TODO: how to fix this??
+            // orginal Java is: src.addAttribute((Class) Iterator.class);  // break this by unsafe cast
+            Assert.Throws<ArgumentException>(() => src.AddAttribute<IEnumerator>(), "Should throw ArgumentException");
+        }
 
-            //try
-            //{
-            //    AttributeSource src = new AttributeSource();
-            //    src.AddAttribute<System.Collections.IEnumerator>(); //Doesn't compile.
-            //    Assert.Fail("Should throw IllegalArgumentException");
-            //}
-            //catch (ArgumentException iae) { }
+        [Test]
+        public void TestLUCENE_3042()
+        {
+            var src1 = new AttributeSource();
+            src1.AddAttribute<ICharTermAttribute>().Append("foo");
+            var hash1 = src1.GetHashCode(); // this triggers a cached state
+            var src2 = new AttributeSource(src1);
+            src2.AddAttribute<ITypeAttribute>().Type = "bar";
+            Assert.True(hash1 != src1.GetHashCode(), "The hashCode is identical, so the captured state was preserved.");
+            Assert.Equals(src2.GetHashCode(), src1.GetHashCode());
         }
     }
     

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestBitVector.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestBitVector.cs b/test/core/Util/TestBitVector.cs
deleted file mode 100644
index 69798f3..0000000
--- a/test/core/Util/TestBitVector.cs
+++ /dev/null
@@ -1,311 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-
-using NUnit.Framework;
-
-using Directory = Lucene.Net.Store.Directory;
-using RAMDirectory = Lucene.Net.Store.RAMDirectory;
-
-namespace Lucene.Net.Util
-{
-	
-	/// <summary> <c>TestBitVector</c> tests the <c>BitVector</c>, obviously.
-	/// 
-	/// 
-	/// </summary>
-	/// <version>  $Id: TestBitVector.java 765649 2009-04-16 14:29:26Z mikemccand $
-	/// </version>
-	[TestFixture]
-	public class TestBitVector:LuceneTestCase
-	{
-		
-		/// <summary> Test the default constructor on BitVectors of various sizes.</summary>
-		/// <throws>  Exception </throws>
-		[Test]
-		public virtual void  TestConstructSize()
-		{
-			DoTestConstructOfSize(8);
-			DoTestConstructOfSize(20);
-			DoTestConstructOfSize(100);
-			DoTestConstructOfSize(1000);
-		}
-		
-		private void  DoTestConstructOfSize(int n)
-		{
-			BitVector bv = new BitVector(n);
-			Assert.AreEqual(n, bv.Size());
-		}
-		
-		/// <summary> Test the get() and set() methods on BitVectors of various sizes.</summary>
-		/// <throws>  Exception </throws>
-		[Test]
-		public virtual void  TestGetSet()
-		{
-			DoTestGetSetVectorOfSize(8);
-			DoTestGetSetVectorOfSize(20);
-			DoTestGetSetVectorOfSize(100);
-			DoTestGetSetVectorOfSize(1000);
-		}
-		
-		private void  DoTestGetSetVectorOfSize(int n)
-		{
-			BitVector bv = new BitVector(n);
-			for (int i = 0; i < bv.Size(); i++)
-			{
-				// ensure a set bit can be git'
-				Assert.IsFalse(bv.Get(i));
-				bv.Set(i);
-				Assert.IsTrue(bv.Get(i));
-			}
-		}
-		
-		/// <summary> Test the clear() method on BitVectors of various sizes.</summary>
-		/// <throws>  Exception </throws>
-		[Test]
-		public virtual void  TestClear()
-		{
-			DoTestClearVectorOfSize(8);
-			DoTestClearVectorOfSize(20);
-			DoTestClearVectorOfSize(100);
-			DoTestClearVectorOfSize(1000);
-		}
-		
-		private void  DoTestClearVectorOfSize(int n)
-		{
-			BitVector bv = new BitVector(n);
-			for (int i = 0; i < bv.Size(); i++)
-			{
-				// ensure a set bit is cleared
-				Assert.IsFalse(bv.Get(i));
-				bv.Set(i);
-				Assert.IsTrue(bv.Get(i));
-				bv.Clear(i);
-				Assert.IsFalse(bv.Get(i));
-			}
-		}
-		
-		/// <summary> Test the count() method on BitVectors of various sizes.</summary>
-		/// <throws>  Exception </throws>
-		[Test]
-		public virtual void  TestCount()
-		{
-			DoTestCountVectorOfSize(8);
-			DoTestCountVectorOfSize(20);
-			DoTestCountVectorOfSize(100);
-			DoTestCountVectorOfSize(1000);
-		}
-		
-		private void  DoTestCountVectorOfSize(int n)
-		{
-			BitVector bv = new BitVector(n);
-			// test count when incrementally setting bits
-			for (int i = 0; i < bv.Size(); i++)
-			{
-				Assert.IsFalse(bv.Get(i));
-				Assert.AreEqual(i, bv.Count());
-				bv.Set(i);
-				Assert.IsTrue(bv.Get(i));
-				Assert.AreEqual(i + 1, bv.Count());
-			}
-			
-			bv = new BitVector(n);
-			// test count when setting then clearing bits
-			for (int i = 0; i < bv.Size(); i++)
-			{
-				Assert.IsFalse(bv.Get(i));
-				Assert.AreEqual(0, bv.Count());
-				bv.Set(i);
-				Assert.IsTrue(bv.Get(i));
-				Assert.AreEqual(1, bv.Count());
-				bv.Clear(i);
-				Assert.IsFalse(bv.Get(i));
-				Assert.AreEqual(0, bv.Count());
-			}
-		}
-		
-		/// <summary> Test writing and construction to/from Directory.</summary>
-		/// <throws>  Exception </throws>
-		[Test]
-		public virtual void  TestWriteRead()
-		{
-			DoTestWriteRead(8);
-			DoTestWriteRead(20);
-			DoTestWriteRead(100);
-			DoTestWriteRead(1000);
-		}
-		
-		private void  DoTestWriteRead(int n)
-		{
-			Directory d = new RAMDirectory();
-			
-			BitVector bv = new BitVector(n);
-			// test count when incrementally setting bits
-			for (int i = 0; i < bv.Size(); i++)
-			{
-				Assert.IsFalse(bv.Get(i));
-				Assert.AreEqual(i, bv.Count());
-				bv.Set(i);
-				Assert.IsTrue(bv.Get(i));
-				Assert.AreEqual(i + 1, bv.Count());
-				bv.Write(d, "TESTBV");
-				BitVector compare = new BitVector(d, "TESTBV");
-				// compare bit vectors with bits set incrementally
-				Assert.IsTrue(DoCompare(bv, compare));
-			}
-		}
-		
-		/// <summary> Test r/w when size/count cause switching between bit-set and d-gaps file formats.  </summary>
-		/// <throws>  Exception </throws>
-		[Test]
-		public virtual void  TestDgaps()
-		{
-			DoTestDgaps(1, 0, 1);
-			DoTestDgaps(10, 0, 1);
-			DoTestDgaps(100, 0, 1);
-			DoTestDgaps(1000, 4, 7);
-			DoTestDgaps(10000, 40, 43);
-			DoTestDgaps(100000, 415, 418);
-			DoTestDgaps(1000000, 3123, 3126);
-		}
-		
-		private void  DoTestDgaps(int size, int count1, int count2)
-		{
-			Directory d = new RAMDirectory();
-			BitVector bv = new BitVector(size);
-			for (int i = 0; i < count1; i++)
-			{
-				bv.Set(i);
-				Assert.AreEqual(i + 1, bv.Count());
-			}
-			bv.Write(d, "TESTBV");
-			// gradually increase number of set bits
-			for (int i = count1; i < count2; i++)
-			{
-				BitVector bv2 = new BitVector(d, "TESTBV");
-				Assert.IsTrue(DoCompare(bv, bv2));
-				bv = bv2;
-				bv.Set(i);
-				Assert.AreEqual(i + 1, bv.Count());
-				bv.Write(d, "TESTBV");
-			}
-			// now start decreasing number of set bits
-			for (int i = count2 - 1; i >= count1; i--)
-			{
-				BitVector bv2 = new BitVector(d, "TESTBV");
-				Assert.IsTrue(DoCompare(bv, bv2));
-				bv = bv2;
-				bv.Clear(i);
-				Assert.AreEqual(i, bv.Count());
-				bv.Write(d, "TESTBV");
-			}
-		}
-		/// <summary> Compare two BitVectors.
-		/// This should really be an equals method on the BitVector itself.
-		/// </summary>
-		/// <param name="bv">One bit vector
-		/// </param>
-		/// <param name="compare">The second to compare
-		/// </param>
-		private bool DoCompare(BitVector bv, BitVector compare)
-		{
-			bool equal = true;
-			for (int i = 0; i < bv.Size(); i++)
-			{
-				// bits must be equal
-				if (bv.Get(i) != compare.Get(i))
-				{
-					equal = false;
-					break;
-				}
-			}
-			return equal;
-		}
-		
-		private static int[] subsetPattern = new int[]{1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1};
-		
-		/// <summary> Tests BitVector.subset() against the above pattern</summary>
-		[Test]
-		public virtual void  TestSubset()
-		{
-			DoTestSubset(0, 0);
-			DoTestSubset(0, 20);
-			DoTestSubset(0, 7);
-			DoTestSubset(0, 8);
-			DoTestSubset(0, 9);
-			DoTestSubset(0, 15);
-			DoTestSubset(0, 16);
-			DoTestSubset(0, 17);
-			DoTestSubset(1, 7);
-			DoTestSubset(1, 8);
-			DoTestSubset(1, 9);
-			DoTestSubset(1, 15);
-			DoTestSubset(1, 16);
-			DoTestSubset(1, 17);
-			DoTestSubset(2, 20);
-			DoTestSubset(3, 20);
-			DoTestSubset(4, 20);
-			DoTestSubset(5, 20);
-			DoTestSubset(6, 20);
-			DoTestSubset(7, 14);
-			DoTestSubset(7, 15);
-			DoTestSubset(7, 16);
-			DoTestSubset(8, 15);
-			DoTestSubset(9, 20);
-			DoTestSubset(10, 20);
-			DoTestSubset(11, 20);
-			DoTestSubset(12, 20);
-			DoTestSubset(13, 20);
-		}
-		
-		/// <summary> Compare a subset against the corresponding portion of the test pattern</summary>
-		private void  DoTestSubset(int start, int end)
-		{
-			BitVector full = CreateSubsetTestVector();
-			BitVector subset = full.Subset(start, end);
-			Assert.AreEqual(end - start, subset.Size());
-			int count = 0;
-			for (int i = start, j = 0; i < end; i++, j++)
-			{
-				if (subsetPattern[i] == 1)
-				{
-					count++;
-					Assert.IsTrue(subset.Get(j));
-				}
-				else
-				{
-					Assert.IsFalse(subset.Get(j));
-				}
-			}
-			Assert.AreEqual(count, subset.Count());
-		}
-		
-		private BitVector CreateSubsetTestVector()
-		{
-			BitVector bv = new BitVector(subsetPattern.Length);
-			for (int i = 0; i < subsetPattern.Length; i++)
-			{
-				if (subsetPattern[i] == 1)
-				{
-					bv.Set(i);
-				}
-			}
-			return bv;
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestByteBlockPool.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestByteBlockPool.cs b/test/core/Util/TestByteBlockPool.cs
new file mode 100644
index 0000000..cef6110
--- /dev/null
+++ b/test/core/Util/TestByteBlockPool.cs
@@ -0,0 +1,59 @@
+using System;
+using System.Collections.Generic;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestByteBlockPool : LuceneTestCase
+    {
+        [Test]
+        public void TestReadAndWrite()
+        {
+            var random = new Random();
+
+            Counter bytesUsed = Counter.NewCounter();
+            var pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
+            pool.NextBuffer();
+            var reuseFirst = random.NextBool();
+            for (var j = 0; j < 2; j++)
+            {
+
+                var list = new List<BytesRef>();
+                int maxLength = AtLeast(500);
+                int numValues = AtLeast(100);
+                var bytesRef = new BytesRef();
+                for (var i = 0; i < numValues; i++)
+                {
+                    string value = _TestUtil.RandomRealisticUnicodeString(random,
+                       maxLength);
+                    list.Add(new BytesRef(value));
+                    bytesRef.CopyChars(value);
+                    pool.Append(bytesRef);
+                }
+                // verify
+                long position = 0;
+                foreach (var expected in list)
+                {
+                    bytesRef.Grow(expected.length);
+                    bytesRef.length = expected.length;
+                    pool.ReadBytes(position, bytesRef.bytes, bytesRef.offset, bytesRef.length);
+                    assertEquals(expected, bytesRef);
+                    position += bytesRef.length;
+                }
+                pool.Reset(random.NextBool(), reuseFirst);
+                if (reuseFirst)
+                {
+                    assertEquals(ByteBlockPool.BYTE_BLOCK_SIZE, bytesUsed.Get());
+                }
+                else
+                {
+                    assertEquals(0, bytesUsed.Get());
+                    pool.NextBuffer(); // prepare for next iter
+                }
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestBytesRef.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestBytesRef.cs b/test/core/Util/TestBytesRef.cs
new file mode 100644
index 0000000..4e8b0b9
--- /dev/null
+++ b/test/core/Util/TestBytesRef.cs
@@ -0,0 +1,68 @@
+using System;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestBytesRef : LuceneTestCase
+    {
+        [Test]
+        public void TestEmpty()
+        {
+            var b = new BytesRef();
+            Assert.Equals(BytesRef.EMPTY_BYTES, b.bytes);
+            Assert.Equals(0, b.offset);
+            Assert.Equals(0, b.length);
+        }
+
+        [Test]
+        public void TestFromBytes()
+        {
+            var bytes = new sbyte[] { (sbyte)'a', (sbyte)'b', (sbyte)'c', (sbyte)'d' };
+            var b = new BytesRef(bytes);
+            Assert.Equals(bytes, b.bytes);
+            Assert.Equals(0, b.offset);
+            Assert.Equals(4, b.length);
+
+            var b2 = new BytesRef(bytes, 1, 3);
+            Assert.Equals("bcd", b2.Utf8ToString());
+
+            Assert.IsFalse(b.Equals(b2));
+        }
+
+        [Test]
+        public void TestFromChars()
+        {
+            for (var i = 0; i < 100; i++)
+            {
+                string s = _TestUtil.RandomUnicodeString(new Random());
+                string s2 = new BytesRef(s).Utf8ToString();
+                Assert.Equals(s, s2);
+            }
+
+            // only for 4.x
+            Assert.Equals("\uFFFF", new BytesRef("\uFFFF").Utf8ToString());
+        }
+
+        // LUCENE-3590, AIOOBE if you append to a bytesref with offset != 0
+        [Test]
+        public void TestAppend()
+        {
+            var bytes = new sbyte[] { (sbyte)'a', (sbyte)'b', (sbyte)'c', (sbyte)'d' };
+            var b = new BytesRef(bytes, 1, 3); // bcd
+            b.Append(new BytesRef("e"));
+            Assert.Equals("bcde", b.Utf8ToString());
+        }
+
+        // LUCENE-3590, AIOOBE if you copy to a bytesref with offset != 0
+        [Test]
+        public void TestCopyBytes()
+        {
+            var bytes = new sbyte[] { (sbyte)'a', (sbyte)'b', (sbyte)'c', (sbyte)'d' };
+            var b = new BytesRef(bytes, 1, 3); // bcd
+            b.CopyBytes(new BytesRef("bcde"));
+            Assert.Equals("bcde", b.Utf8ToString());
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestBytesRefHash.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestBytesRefHash.cs b/test/core/Util/TestBytesRefHash.cs
new file mode 100644
index 0000000..a950da6
--- /dev/null
+++ b/test/core/Util/TestBytesRefHash.cs
@@ -0,0 +1,187 @@
+using System;
+using System.Collections.Generic;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+using System.Linq;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestBytesRefHash : LuceneTestCase
+    {
+        private IList<int> CreateRandomList(int maxSize)
+        {
+            var rnd = new Random();
+            var a = new int[rnd.Next(maxSize) + 1];
+            for (var i = 0; i < a.Length; i++)
+            {
+                a[i] = rnd.Next(a.Length);
+            }
+            return a.ToList();
+        }
+
+        [Test]
+        public void TestQuickSort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                var list1 = CreateRandomList(2000);
+                var list2 = new List<int>(list1);
+                CollectionUtil.QuickSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(2000);
+                list2 = new List<int>(list1);
+                CollectionUtil.QuickSort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.QuickSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestMergeSort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                var list1 = CreateRandomList(2000);
+                var list2 = new List<int>(list1);
+                CollectionUtil.MergeSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(2000);
+                list2 = new List<int>(list1);
+                CollectionUtil.MergeSort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.MergeSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestTimSort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                var list1 = CreateRandomList(2000);
+                var list2 = new List<int>(list1);
+                CollectionUtil.TimSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(2000);
+                list2 = new List<int>(list1);
+                CollectionUtil.TimSort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.TimSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestInsertionSort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                var list1 = CreateRandomList(30);
+                var list2 = new List<int>(list1);
+                CollectionUtil.InsertionSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(30);
+                list2 = new List<int>(list1);
+                CollectionUtil.InsertionSort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.InsertionSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestBinarySort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                var list1 = CreateRandomList(30);
+                var list2 = new List<int>(list1);
+                CollectionUtil.BinarySort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(30);
+                list2 = new List<int>(list1);
+                CollectionUtil.BinarySort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.BinarySort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestEmptyListSort()
+        {
+            // should produce no exceptions
+            IList<int> list = new int[0].ToList(); // LUCENE-2989
+            CollectionUtil.QuickSort(list);
+            CollectionUtil.MergeSort(list);
+            CollectionUtil.TimSort(list);
+            CollectionUtil.InsertionSort(list);
+            CollectionUtil.BinarySort(list);
+            CollectionUtil.QuickSort(list, Collections.ReverseOrder());
+            CollectionUtil.MergeSort(list, Collections.ReverseOrder());
+            CollectionUtil.TimSort(list, Collections.ReverseOrder());
+            CollectionUtil.InsertionSort(list, Collections.ReverseOrder());
+            CollectionUtil.BinarySort(list, Collections.ReverseOrder());
+
+            // check that empty non-random access lists pass sorting without ex (as sorting is not needed)
+            list = new LinkedList<int>();
+            CollectionUtil.QuickSort(list);
+            CollectionUtil.MergeSort(list);
+            CollectionUtil.TimSort(list);
+            CollectionUtil.InsertionSort(list);
+            CollectionUtil.BinarySort(list);
+            CollectionUtil.QuickSort(list, Collections.ReverseOrder());
+            CollectionUtil.MergeSort(list, Collections.ReverseOrder());
+            CollectionUtil.TimSort(list, Collections.ReverseOrder());
+            CollectionUtil.InsertionSort(list, Collections.ReverseOrder());
+            CollectionUtil.BinarySort(list, Collections.ReverseOrder());
+        }
+
+        [Test]
+        public void TestOneElementListSort()
+        {
+            // check that one-element non-random access lists pass sorting without ex (as sorting is not needed)
+            IList<int> list = new LinkedList<int>();
+            list.Add(1);
+            CollectionUtil.QuickSort(list);
+            CollectionUtil.MergeSort(list);
+            CollectionUtil.TimSort(list);
+            CollectionUtil.InsertionSort(list);
+            CollectionUtil.BinarySort(list);
+            CollectionUtil.QuickSort(list, Collections.ReverseOrder());
+            CollectionUtil.MergeSort(list, Collections.ReverseOrder());
+            CollectionUtil.TimSort(list, Collections.ReverseOrder());
+            CollectionUtil.InsertionSort(list, Collections.ReverseOrder());
+            CollectionUtil.BinarySort(list, Collections.ReverseOrder());
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestCharsRef.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestCharsRef.cs b/test/core/Util/TestCharsRef.cs
new file mode 100644
index 0000000..e6da517
--- /dev/null
+++ b/test/core/Util/TestCharsRef.cs
@@ -0,0 +1,147 @@
+using System;
+using System.Text;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestCharsRef : LuceneTestCase
+    {
+        [Test]
+        public void TestUTF16InUTF8Order()
+        {
+            int numStrings = AtLeast(1000);
+            var utf8 = new BytesRef[numStrings];
+            var utf16 = new CharsRef[numStrings];
+
+            for (var i = 0; i < numStrings; i++)
+            {
+                string s = _TestUtil.RandomUnicodeString(new Random());
+                utf8[i] = new BytesRef(s);
+                utf16[i] = new CharsRef(s);
+            }
+
+            Array.Sort(utf8);
+            Array.Sort(utf16, CharsRef.GetUTF16SortedAsUTF8Comparator());
+
+            for (var i = 0; i < numStrings; i++)
+            {
+                Assert.Equals(utf8[i].Utf8ToString(), utf16[i].ToString());
+            }
+        }
+
+        [Test]
+        public void TestAppend()
+        {
+            var charsRef = new CharsRef();
+            var builder = new StringBuilder();
+            int numStrings = atLeast(10);
+            for (var i = 0; i < numStrings; i++)
+            {
+                char[] charArray = _TestUtil.RandomRealisticUnicodeString(new Random(), 1, 100).ToCharArray();
+                var offset = new Random().Next(charArray.Length);
+                var length = charArray.Length - offset;
+                builder.Append(charArray, offset, length);
+                charsRef.Append(charArray, offset, length);
+            }
+
+            Assert.Equals(builder.ToString(), charsRef.ToString());
+        }
+
+        [Test]
+        public void TestCopy()
+        {
+            int numIters = AtLeast(10);
+            for (var i = 0; i < numIters; i++)
+            {
+                var charsRef = new CharsRef();
+                char[] charArray = _TestUtil.RandomRealisticUnicodeString(new Random(), 1, 100).ToCharArray();
+                var offset = new Random().Next(charArray.Length);
+                var Length = charArray.Length - offset;
+                var str = new string(charArray, offset, Length);
+                charsRef.CopyChars(charArray, offset, Length);
+                Assert.Equals(str, charsRef.ToString());
+            }
+        }
+
+        // LUCENE-3590, AIOOBE if you Append to a charsref with offset != 0
+        [Test]
+        public void TestAppendChars()
+        {
+            var chars = new char[] { 'a', 'b', 'c', 'd' };
+            var c = new CharsRef(chars, 1, 3); // bcd
+            c.Append(new char[] { 'e' }, 0, 1);
+            Assert.Equals("bcde", c.ToString());
+        }
+
+        // LUCENE-3590, AIOOBE if you copy to a charsref with offset != 0
+        [Test]
+        public void TestCopyChars()
+        {
+            var chars = new char[] { 'a', 'b', 'c', 'd' };
+            var c = new CharsRef(chars, 1, 3); // bcd
+            var otherchars = new char[] { 'b', 'c', 'd', 'e' };
+            c.CopyChars(otherchars, 0, 4);
+            Assert.Equals("bcde", c.ToString());
+        }
+
+        // LUCENE-3590, AIOOBE if you copy to a charsref with offset != 0
+        [Test]
+        public void TestCopyCharsRef()
+        {
+            var chars = new char[] { 'a', 'b', 'c', 'd' };
+            var c = new CharsRef(chars, 1, 3); // bcd
+            char otherchars = new char[] { 'b', 'c', 'd', 'e' };
+            c.CopyChars(new CharsRef(otherchars, 0, 4));
+            Assert.Equals("bcde", c.ToString());
+        }
+
+        // LUCENE-3590: fix charsequence to fully obey interface
+        [Test]
+        public void TestCharSequenceCharAt()
+        {
+            var c = new CharsRef("abc");
+
+            Assert.Equals('b', c.CharAt(1));
+
+            Assert.Throws<IndexOutOfRangeException>(() => c.CharAt(-1));
+            Assert.Throws<IndexOutOfRangeException>(() => c.CharAt(3));
+        }
+
+        // LUCENE-3590: fix off-by-one in subsequence, and fully obey interface
+        // LUCENE-4671: fix SubSequence
+        [Test]
+        public void TestCharSequenceSubSequence()
+        {
+            var sequences = new[] {
+                new CharsRef("abc"),
+                new CharsRef("0abc".ToCharArray(), 1, 3),
+                new CharsRef("abc0".ToCharArray(), 0, 3),
+                new CharsRef("0abc0".ToCharArray(), 1, 3)
+            };
+
+            foreach (var c in sequences)
+            {
+                DoTestSequence(c);
+            }
+        }
+
+        private void DoTestSequence(CharSequence c)
+        {
+            // slice
+            Assert.Equals("a", c.SubSequence(0, 1).toString());
+            // mid subsequence
+            Assert.Equals("b", c.SubSequence(1, 2).toString());
+            // end subsequence
+            Assert.Equals("bc", c.SubSequence(1, 3).toString());
+            // empty subsequence
+            Assert.Equals("", c.SubSequence(0, 0).toString());
+
+            Assert.Throws<IndexOutOfRangeException>(() => c.SubSequence(-1, 1));
+            Assert.Throws<IndexOutOfRangeException>(() => c.SubSequence(0, -1));
+            Assert.Throws<IndexOutOfRangeException>(() => c.SubSequence(0, 4));
+            Assert.Throws<IndexOutOfRangeException>(() => c.SubSequence(2, 1));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestCloseableThreadLocal.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestCloseableThreadLocal.cs b/test/core/Util/TestCloseableThreadLocal.cs
index 0cc4fbd..f7b55a9 100644
--- a/test/core/Util/TestCloseableThreadLocal.cs
+++ b/test/core/Util/TestCloseableThreadLocal.cs
@@ -15,23 +15,20 @@
  * limitations under the License.
  */
 
-using System;
-
 using NUnit.Framework;
 
 namespace Lucene.Net.Util
 {
-	
     [TestFixture]
-	public class TestCloseableThreadLocal:LuceneTestCase
+	public class TestCloseableThreadLocal : LuceneTestCase
 	{
-		public const System.String TEST_VALUE = "initvaluetest";
+		public const string TEST_VALUE = "initvaluetest";
 		
         [Test]
 		public virtual void  TestInitValue()
 		{
-			InitValueThreadLocal tl = new InitValueThreadLocal(this);
-			System.String str = (System.String) tl.Get();
+			var tl = new InitValueThreadLocal();
+			var str = (string) tl.Get();
 			Assert.AreEqual(TEST_VALUE, str);
 		}
 		
@@ -40,7 +37,7 @@ namespace Lucene.Net.Util
 		{
 			// Tests that null can be set as a valid value (LUCENE-1805). This
 			// previously failed in get().
-            CloseableThreadLocal<object> ctl = new CloseableThreadLocal<object>();
+            var ctl = new CloseableThreadLocal<object>();
 			ctl.Set(null);
 			Assert.IsNull(ctl.Get());
 		}
@@ -50,33 +47,15 @@ namespace Lucene.Net.Util
 		{
 			// LUCENE-1805: make sure default get returns null,
 			// twice in a row
-            CloseableThreadLocal<object> ctl = new CloseableThreadLocal<object>();
-			Assert.IsNull(ctl.Get());
+            var ctl = new CloseableThreadLocal<object>();
 			Assert.IsNull(ctl.Get());
 		}
 
         public class InitValueThreadLocal : CloseableThreadLocal<object>
 		{
-			public InitValueThreadLocal(TestCloseableThreadLocal enclosingInstance)
-			{
-				InitBlock(enclosingInstance);
-			}
-			private void  InitBlock(TestCloseableThreadLocal enclosingInstance)
-			{
-				this.enclosingInstance = enclosingInstance;
-			}
-			private TestCloseableThreadLocal enclosingInstance;
-			public TestCloseableThreadLocal Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			public /*protected internal*/ override System.Object InitialValue()
+			public /*protected internal*/ override object InitialValue()
 			{
-				return Lucene.Net.Util.TestCloseableThreadLocal.TEST_VALUE;
+				return TEST_VALUE;
 			}
 		}
 	}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestCollectionUtil.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestCollectionUtil.cs b/test/core/Util/TestCollectionUtil.cs
new file mode 100644
index 0000000..baa7bca
--- /dev/null
+++ b/test/core/Util/TestCollectionUtil.cs
@@ -0,0 +1,186 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestCollectionUtil : LuceneTestCase
+    {
+        private List<int> CreateRandomList(int maxSize)
+        {
+            Random rnd = new Random();
+            int[] a = new int[rnd.Next(maxSize) + 1];
+            for (int i = 0; i < a.Length; i++)
+            {
+                a[i] = rnd.Next(a.Length);
+            }
+            return a.ToList();
+        }
+
+        [Test]
+        public void TestQuickSort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                var list1 = CreateRandomList(2000);
+                var list2 = new List<int>(list1);
+                CollectionUtil.QuickSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(2000);
+                list2 = new List<int>(list1);
+                CollectionUtil.QuickSort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.QuickSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestMergeSort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                var list1 = CreateRandomList(2000); 
+                var list2 = new List<int>(list1);
+                CollectionUtil.MergeSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(2000);
+                list2 = new List<int>(list1);
+                CollectionUtil.MergeSort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.MergeSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestTimSort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                var list1 = CreateRandomList(2000);
+                var list2 = new List<int>(list1);
+                CollectionUtil.TimSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(2000);
+                list2 = new List<int>(list1);
+                CollectionUtil.TimSort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.TimSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestInsertionSort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                var list1 = CreateRandomList(30);
+                var list2 = new List<int>(list1);
+                CollectionUtil.InsertionSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(30);
+                list2 = new List<int>(list1);
+                CollectionUtil.InsertionSort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.InsertionSort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestBinarySort()
+        {
+            for (int i = 0, c = AtLeast(500); i < c; i++)
+            {
+                List<int> list1 = CreateRandomList(30), list2 = new List<int>(list1);
+                CollectionUtil.BinarySort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+
+                list1 = CreateRandomList(30);
+                list2 = new List<int>(list1);
+                CollectionUtil.BinarySort(list1, Collections.ReverseOrder());
+                Collections.Sort(list2, Collections.ReverseOrder());
+                assertEquals(list2, list1);
+                // reverse back, so we can test that completely backwards sorted array (worst case) is working:
+                CollectionUtil.BinarySort(list1);
+                Collections.Sort(list2);
+                assertEquals(list2, list1);
+            }
+        }
+
+        [Test]
+        public void TestEmptyListSort()
+        {
+            // should produce no exceptions
+            List<int> list = new List<int>(); // LUCENE-2989
+            CollectionUtil.QuickSort(list);
+            CollectionUtil.MergeSort(list);
+            CollectionUtil.TimSort(list);
+            CollectionUtil.InsertionSort(list);
+            CollectionUtil.BinarySort(list);
+            CollectionUtil.QuickSort(list, Collections.ReverseOrder());
+            CollectionUtil.MergeSort(list, Collections.ReverseOrder());
+            CollectionUtil.TimSort(list, Collections.ReverseOrder());
+            CollectionUtil.InsertionSort(list, Collections.ReverseOrder());
+            CollectionUtil.BinarySort(list, Collections.ReverseOrder());
+
+            // check that empty non-new Random access lists pass sorting without ex (as sorting is not needed)
+            list = new LinkedList<int>();
+            CollectionUtil.QuickSort(list);
+            CollectionUtil.MergeSort(list);
+            CollectionUtil.TimSort(list);
+            CollectionUtil.InsertionSort(list);
+            CollectionUtil.BinarySort(list);
+            CollectionUtil.QuickSort(list, Collections.ReverseOrder());
+            CollectionUtil.MergeSort(list, Collections.ReverseOrder());
+            CollectionUtil.TimSort(list, Collections.ReverseOrder());
+            CollectionUtil.InsertionSort(list, Collections.ReverseOrder());
+            CollectionUtil.BinarySort(list, Collections.ReverseOrder());
+        }
+
+        [Test]
+        public void TestOneElementListSort()
+        {
+            // check that one-element non-new Random access lists pass sorting without ex (as sorting is not needed)
+            List<int> list = new LinkedList<int>();
+            list.Add(1);
+            CollectionUtil.QuickSort(list);
+            CollectionUtil.MergeSort(list);
+            CollectionUtil.TimSort(list);
+            CollectionUtil.InsertionSort(list);
+            CollectionUtil.BinarySort(list);
+            CollectionUtil.QuickSort(list, Collections.ReverseOrder());
+            CollectionUtil.MergeSort(list, Collections.ReverseOrder());
+            CollectionUtil.TimSort(list, Collections.ReverseOrder());
+            CollectionUtil.InsertionSort(list, Collections.ReverseOrder());
+            CollectionUtil.BinarySort(list, Collections.ReverseOrder());
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d3c00f5a/test/core/Util/TestDoubleBarrelLRUCache.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestDoubleBarrelLRUCache.cs b/test/core/Util/TestDoubleBarrelLRUCache.cs
new file mode 100644
index 0000000..fabe4ab
--- /dev/null
+++ b/test/core/Util/TestDoubleBarrelLRUCache.cs
@@ -0,0 +1,212 @@
+using System;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestDoubleBarrelLRUCache : LuceneTestCase
+    {
+        [Test]
+        private void TestCache(DoubleBarrelLRUCache<CloneableInteger, object> cache, int n)
+        {
+            var dummy = new object();
+
+            for (var i = 0; i < n; i++)
+            {
+                cache[new CloneableInteger(i)] = dummy;
+            }
+
+            // access every 2nd item in cache
+            for (var i = 0; i < n; i += 2)
+            {
+                assertNotNull(cache[new CloneableInteger(i)]);
+            }
+
+            // add n/2 elements to cache, the ones that weren't
+            // touched in the previous loop should now be thrown away
+            for (var i = n; i < n + (n / 2); i++)
+            {
+                cache[new CloneableInteger(i)] = dummy;
+            }
+
+            // access every 4th item in cache
+            for (var i = 0; i < n; i += 4)
+            {
+                assertNotNull(cache[new CloneableInteger(i)]);
+            }
+
+            // add 3/4n elements to cache, the ones that weren't
+            // touched in the previous loops should now be thrown away
+            for (var i = n; i < n + (n * 3 / 4); i++)
+            {
+                cache[new CloneableInteger(i)] = dummy;
+            }
+
+            // access every 4th item in cache
+            for (var i = 0; i < n; i += 4)
+            {
+                assertNotNull(cache[new CloneableInteger(i)]);
+            }
+        }
+
+        [Test]
+        public void TestLRUCache()
+        {
+            var n = 100;
+            TestCache(new DoubleBarrelLRUCache<CloneableInteger, object>(n), n);
+        }
+
+        private class CacheThread : ThreadClass
+        {
+            private TestDoubleBarrelLRUCache parent;
+
+            private CloneableObject[] objs;
+            private DoubleBarrelLRUCache<CloneableObject, object> c;
+            private DateTime endTime;
+            volatile bool failed;
+
+            public CacheThread(TestDoubleBarrelLRUCache parent, DoubleBarrelLRUCache<CloneableObject, object> c,
+                               CloneableObject[] objs, long endTime)
+            {
+                this.parent = parent;
+
+                this.c = c;
+                this.objs = objs;
+                this.endTime = new DateTime(1970, 1, 1).AddMilliseconds(endTime);
+            }
+
+            public override void Run()
+            {
+                try
+                {
+                    long count = 0;
+                    long miss = 0;
+                    long hit = 0;
+                    var limit = objs.Length;
+
+                    while (true)
+                    {
+                        var obj = objs[(int)((count / 2) % limit)];
+                        var v = c[obj];
+                        if (v == null)
+                        {
+                            c[new CloneableObject(obj)] = obj;
+                            //c.Put(new CloneableObject(obj), obj);
+                            miss++;
+                        }
+                        else
+                        {
+                            //assert obj == v;
+                            hit++;
+                        }
+                        if ((++count % 10000) == 0)
+                        {
+                            if (DateTime.Now >= endTime)
+                            {
+                                break;
+                            }
+                        }
+                    }
+
+                    parent.AddResults(miss, hit);
+                }
+                catch (Exception e)
+                {
+                    failed = true;
+                    throw;
+                }
+            }
+        }
+
+        long totMiss, totHit;
+        void AddResults(long miss, long hit)
+        {
+            totMiss += miss;
+            totHit += hit;
+        }
+
+        [Test]
+        public void TestThreadCorrectness()
+        {
+            var NUM_THREADS = 4;
+            var CACHE_SIZE = 512;
+            var OBJ_COUNT = 3 * CACHE_SIZE;
+
+            var c = new DoubleBarrelLRUCache<CloneableObject, object>(1024);
+
+            var objs = new CloneableObject[OBJ_COUNT];
+            for (var i = 0; i < OBJ_COUNT; i++)
+            {
+                objs[i] = new CloneableObject(new object());
+            }
+
+            var threads = new CacheThread[NUM_THREADS];
+            var endTime = (long)((DateTime.Now.Subtract(new DateTime(1970, 1, 1))
+                .Add(TimeSpan.FromMilliseconds(1000))).TotalMilliseconds);
+            //long endTime = System.currentTimeMillis() + 1000L;
+            for (var i = 0; i < NUM_THREADS; i++)
+            {
+                threads[i] = new CacheThread(this, c, objs, endTime);
+                threads[i].Start();
+            }
+            for (var i = 0; i < NUM_THREADS; i++)
+            {
+                threads[i].Join();
+                //assert !threads[i].failed;
+            }
+            //System.out.println("hits=" + totHit + " misses=" + totMiss);
+        }
+
+        public class CloneableObject : DoubleBarrelLRUCache.CloneableKey
+        {
+            private object value;
+
+            public CloneableObject(object value)
+            {
+                this.value = value;
+            }
+
+            public override bool Equals(object other)
+            {
+                return this.value.Equals(((CloneableObject)other).value);
+            }
+
+            public override int GetHashCode()
+            {
+                return value.GetHashCode();
+            }
+
+            public override DoubleBarrelLRUCache.CloneableKey Clone()
+            {
+                return new CloneableObject(value);
+            }
+        }
+
+        public class CloneableInteger : DoubleBarrelLRUCache.CloneableKey
+        {
+            private int value;
+
+            public CloneableInteger(int value)
+            {
+                this.value = value;
+            }
+
+            public override bool Equals(object other)
+            {
+                return this.value.Equals(((CloneableInteger)other).value);
+            }
+
+            public override int GetHashCode()
+            {
+                return value.GetHashCode();
+            }
+
+            public override DoubleBarrelLRUCache.CloneableKey Clone()
+            {
+                return new CloneableInteger(value);
+            }
+        }
+    }
+}


[03/50] [abbrv] git commit: resolve merge conflict

Posted by mh...@apache.org.
resolve merge conflict


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/e47e6637
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/e47e6637
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/e47e6637

Branch: refs/heads/branch_4x
Commit: e47e6637572c7b58b2a366caee7f3e7f1a308274
Parents: 64c13f3 d4b0864
Author: Mike Potts <mi...@feature23.com>
Authored: Wed Jul 10 23:24:34 2013 -0400
Committer: Mike Potts <mi...@feature23.com>
Committed: Wed Jul 10 23:24:34 2013 -0400

----------------------------------------------------------------------
 build/vs2012/test/Lucene.Net.Test.sln           |  58 +-
 src/contrib/Analyzers/Contrib.Analyzers.csproj  |  10 +
 src/contrib/Analyzers/Core/KeywordAnalyzer.cs   |  15 +
 src/contrib/Analyzers/Core/KeywordTokenizer.cs  |  87 +++
 .../Analyzers/Core/KeywordTokenizerFactory.cs   |  12 +
 .../Analyzers/Support/StringExtensions.cs       |  15 +
 .../Analyzers/Util/AbstractAnalysisFactory.cs   | 355 +++++++++
 src/contrib/Analyzers/Util/CharArrayMap.cs      | 480 ++++++++++++
 src/contrib/Analyzers/Util/CharArraySet.cs      |  15 +
 src/contrib/Analyzers/Util/CharacterUtils.cs    | 130 ++++
 src/contrib/Analyzers/Util/IResourceLoader.cs   |  17 +
 src/contrib/Analyzers/Util/TokenizerFactory.cs  |  11 +
 .../CompressingStoredFieldsFormat.cs            | 168 +++--
 .../CompressingStoredFieldsReader.cs            | 744 ++++++++++---------
 src/core/Codecs/Compressing/CompressionMode.cs  | 446 +++++------
 src/core/Codecs/Compressing/LZ4.cs              |   2 +-
 src/core/Index/IndexWriter.cs                   |   2 +-
 src/core/Support/Character.cs                   |  19 +
 src/core/Util/Version.cs                        |   2 +-
 test/core/Analysis/BaseTokenStreamTestCase.cs   | 257 -------
 test/core/Lucene.Net.Test.csproj                |  10 +-
 test/core/Util/LuceneTestCase.cs                | 297 --------
 .../Analysis/BaseTokenStreamTestCase.cs         | 532 +++++++++++++
 .../Lucene.Net.TestFramework.csproj             |  70 ++
 test/test-framework/Properties/AssemblyInfo.cs  |  36 +
 test/test-framework/Support/RandomizedTest.cs   |  41 +
 test/test-framework/Support/SystemProperties.cs |  26 +
 test/test-framework/Util/LuceneTestCase.cs      | 469 ++++++++++++
 28 files changed, 3100 insertions(+), 1226 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e47e6637/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
----------------------------------------------------------------------
diff --cc src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
index 9c55e07,748efab..a4d7f7d
--- a/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
@@@ -20,357 -20,426 +20,427 @@@ using Lucene.Net.Store
  using Lucene.Net.Support;
  using Lucene.Net.Util;
  using Lucene.Net.Util.Packed;
+ using System;
+ 
  namespace Lucene.Net.Codecs.Compressing
  {
- /**
-  * {@link StoredFieldsReader} impl for {@link CompressingStoredFieldsFormat}.
-  * @lucene.experimental
-  */
- public sealed class CompressingStoredFieldsReader: StoredFieldsReader {
- 
-   private FieldInfos fieldInfos;
-   private CompressingStoredFieldsIndexReader indexReader;
-   private IndexInput fieldsStream;
-   private int packedIntsVersion;
-   private CompressionMode compressionMode;
-   private Decompressor decompressor;
-   private BytesRef bytes;
-   private int numDocs;
-   private bool closed;
- 
-   // used by clone
-   private CompressingStoredFieldsReader(CompressingStoredFieldsReader reader) {
-     this.fieldInfos = reader.fieldInfos;
-     this.fieldsStream = (IndexInput)reader.fieldsStream.Clone();
-     this.indexReader = reader.indexReader.clone();
-     this.packedIntsVersion = reader.packedIntsVersion;
-     this.compressionMode = reader.compressionMode;
-     this.decompressor = (Decompressor)reader.decompressor.Clone();
-     this.numDocs = reader.numDocs;
-     this.bytes = new BytesRef(reader.bytes.bytes.Length);
-     this.closed = false;
-   }
- 
-   /** Sole constructor. */
-   public CompressingStoredFieldsReader(Directory d, SegmentInfo si, string segmentSuffix, FieldInfos fn,
-       IOContext context, string formatName, CompressionMode compressionMode) 
-   {
-     this.compressionMode = compressionMode;
-     string segment = si.name;
-     bool success = false;
-     fieldInfos = fn;
-     numDocs = si.DocCount;
-     IndexInput indexStream = null;
-     try {
-       fieldsStream = d.OpenInput(IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);
-       string indexStreamFN = IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION);
-       indexStream = d.OpenInput(indexStreamFN, context);
- 
-       string codecNameIdx = formatName + CODEC_SFX_IDX;
-       string codecNameDat = formatName + CODEC_SFX_DAT;
-       CodecUtil.CheckHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT);
-       CodecUtil.CheckHeader(fieldsStream, codecNameDat, VERSION_START, VERSION_CURRENT);
- 
-       indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
-       indexStream = null;
- 
-       packedIntsVersion = fieldsStream.ReadVInt();
-       decompressor = compressionMode.newDecompressor();
-       this.bytes = new BytesRef();
- 
-       success = true;
-     } finally {
-       if (!success) {
-         IOUtils.closeWhileHandlingException(this, indexStream);
-       }
-     }
-   }
- 
-   /**
-    * @throws AlreadyClosedException if this FieldsReader is closed
-    */
-   private void ensureOpen() {
-     if (closed) {
-       throw new AlreadyClosedException("this FieldsReader is closed");
-     }
-   }
- 
-   /** 
-    * Close the underlying {@link IndexInput}s.
-    */
-   public override void close() {
-     if (!closed) {
-       IOUtils.Close(fieldsStream, indexReader);
-       closed = true;
-     }
-   }
- 
-   private static void readField(ByteArrayDataInput input, StoredFieldVisitor visitor, FieldInfo info, int bits) {
-     switch (bits & TYPE_MASK) {
-       case BYTE_ARR:
-         int length = input.readVInt();
-         byte[] data = new byte[length];
-         input.readBytes(data, 0, length);
-         visitor.binaryField(info, data);
-         break;
-       case STRING:
-         length = input.readVInt();
-         data = new byte[length];
-         input.readBytes(data, 0, length);
-         visitor.stringField(info, new string(data, IOUtils.CHARSET_UTF_8));
-         break;
-       case NUMERIC_INT:
-         visitor.intField(info, input.readInt());
-         break;
-       case NUMERIC_FLOAT:
-         visitor.floatField(info, Float.intBitsToFloat(input.readInt()));
-         break;
-       case NUMERIC_LONG:
-         visitor.longField(info, input.readLong());
-         break;
-       case NUMERIC_DOUBLE:
-         visitor.doubleField(info, Double.longBitsToDouble(input.readLong()));
-         break;
-       default:
-         throw new AssertionError("Unknown type flag: " + Integer.toHexString(bits));
-     }
-   }
- 
-   private static void skipField(ByteArrayDataInput input, int bits) {
-     switch (bits & TYPE_MASK) {
-       case BYTE_ARR:
-       case STRING:
-         int length = input.readVInt();
-         input.skipBytes(length);
-         break;
-       case NUMERIC_INT:
-       case NUMERIC_FLOAT:
-         input.readInt();
-         break;
-       case NUMERIC_LONG:
-       case NUMERIC_DOUBLE:
-         input.readLong();
-         break;
-       default:
-         throw new AssertionError("Unknown type flag: " + Integer.toHexString(bits));
-     }
-   }
- 
-   public override void visitDocument(int docID, StoredFieldVisitor visitor)
-   {
-     fieldsStream.Seek(indexReader.getStartPointer(docID));
- 
-     int docBase = fieldsStream.ReadVInt();
-     int chunkDocs = fieldsStream.ReadVInt();
-     if (docID < docBase
-         || docID >= docBase + chunkDocs
-         || docBase + chunkDocs > numDocs) {
-       throw new CorruptIndexException("Corrupted: docID=" + docID
-           + ", docBase=" + docBase + ", chunkDocs=" + chunkDocs
-           + ", numDocs=" + numDocs);
-     }
 +
-     int numStoredFields, length, offset, totalLength;
-     if (chunkDocs == 1) {
-       numStoredFields = fieldsStream.ReadVInt();
-       offset = 0;
-       length = fieldsStream.ReadVInt();
-       totalLength = length;
-     } else {
-       int bitsPerStoredFields = fieldsStream.ReadVInt();
-       if (bitsPerStoredFields == 0) {
-         numStoredFields = fieldsStream.ReadVInt();
-       } else if (bitsPerStoredFields > 31) {
-         throw new CorruptIndexException("bitsPerStoredFields=" + bitsPerStoredFields);
-       } else {
-         long filePointer = fieldsStream.getFilePointer();
-         PackedInts.Reader reader = PackedInts.GetDirectReaderNoHeader(fieldsStream, PackedInts.Format.PACKED, packedIntsVersion, chunkDocs, bitsPerStoredFields);
-         numStoredFields = (int) (reader.Get(docID - docBase));
-         fieldsStream.Seek(filePointer + PackedInts.Format.PACKED.ByteCount(packedIntsVersion, chunkDocs, bitsPerStoredFields));
-       }
- 
-       int bitsPerLength = fieldsStream.ReadVInt();
-       if (bitsPerLength == 0) {
-         length = fieldsStream.ReadVInt();
-         offset = (docID - docBase) * length;
-         totalLength = chunkDocs * length;
-       } else if (bitsPerStoredFields > 31) {
-         throw new CorruptIndexException("bitsPerLength=" + bitsPerLength);
-       } else {
-         PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(fieldsStream, PackedInts.Format.PACKED, packedIntsVersion, chunkDocs, bitsPerLength, 1);
-         int off = 0;
-         for (int i = 0; i < docID - docBase; ++i) {
-           //TODO - HACKMP - Paul, this is a point of concern for me, in that everything from this file, and the 
-           //decompressor.Decompress() contract is looking for int.  But, I don't want to simply cast from long to int here.
-           off += it.Next();
+     /**
+      * {@link StoredFieldsReader} impl for {@link CompressingStoredFieldsFormat}.
+      * @lucene.experimental
+      */
+     public sealed class CompressingStoredFieldsReader : StoredFieldsReader
+     {
+ 
+         private FieldInfos fieldInfos;
+         private CompressingStoredFieldsIndexReader indexReader;
+         private IndexInput fieldsStream;
+         private int packedIntsVersion;
+         private CompressionMode compressionMode;
+         private Decompressor decompressor;
+         private BytesRef bytes;
+         private int numDocs;
+         private bool closed;
+ 
+         // used by clone
+         private CompressingStoredFieldsReader(CompressingStoredFieldsReader reader)
+         {
+             this.fieldInfos = reader.fieldInfos;
+             this.fieldsStream = (IndexInput)reader.fieldsStream.Clone();
+             this.indexReader = reader.indexReader.clone();
+             this.packedIntsVersion = reader.packedIntsVersion;
+             this.compressionMode = reader.compressionMode;
+             this.decompressor = (Decompressor)reader.decompressor.Clone();
+             this.numDocs = reader.numDocs;
+             this.bytes = new BytesRef(reader.bytes.bytes.Length);
+             this.closed = false;
          }
-         offset = off;
-         length = (int) it.Next();
-         off += length;
-         for (int i = docID - docBase + 1; i < chunkDocs; ++i) {
-           off += it.Next();
+ 
+         /** Sole constructor. */
+         public CompressingStoredFieldsReader(Directory d, SegmentInfo si, string segmentSuffix, FieldInfos fn,
+             IOContext context, string formatName, CompressionMode compressionMode)
+         {
+             this.compressionMode = compressionMode;
+             string segment = si.name;
+             bool success = false;
+             fieldInfos = fn;
+             numDocs = si.DocCount;
+             IndexInput indexStream = null;
+             try
+             {
+                 fieldsStream = d.OpenInput(IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);
+                 string indexStreamFN = IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION);
+                 indexStream = d.OpenInput(indexStreamFN, context);
+ 
+                 string codecNameIdx = formatName + CODEC_SFX_IDX;
+                 string codecNameDat = formatName + CODEC_SFX_DAT;
+                 CodecUtil.CheckHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT);
+                 CodecUtil.CheckHeader(fieldsStream, codecNameDat, VERSION_START, VERSION_CURRENT);
+ 
+                 indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
+                 indexStream = null;
+ 
+                 packedIntsVersion = fieldsStream.ReadVInt();
+                 decompressor = compressionMode.newDecompressor();
+                 this.bytes = new BytesRef();
+ 
+                 success = true;
+             }
+             finally
+             {
+                 if (!success)
+                 {
+                     IOUtils.CloseWhileHandlingException((IDisposable)this, indexStream);
+                 }
+             }
          }
-         totalLength = off;
-       }
-     }
  
-     if ((length == 0) != (numStoredFields == 0)) {
-       throw new CorruptIndexException("length=" + length + ", numStoredFields=" + numStoredFields);
-     }
-     if (numStoredFields == 0) {
-       // nothing to do
-       return;
-     }
+         /**
+          * @throws AlreadyClosedException if this FieldsReader is closed
+          */
+         private void ensureOpen()
+         {
+             if (closed)
+             {
+                 throw new AlreadyClosedException("this FieldsReader is closed");
+             }
+         }
  
-     decompressor.Decompress(fieldsStream, totalLength, offset, length, bytes);
- 
-     ByteArrayDataInput documentInput = new ByteArrayDataInput(bytes.bytes, bytes.offset, bytes.length);
-     for (int fieldIDX = 0; fieldIDX < numStoredFields; fieldIDX++) {
-       long infoAndBits = documentInput.ReadVLong();
-       int fieldNumber = Number.URShift(infoAndBits, TYPE_BITS); // (infoAndBits >>> TYPE_BITS);
-       FieldInfo fieldInfo = fieldInfos.FieldInfo(fieldNumber);
- 
-       int bits = (int) (infoAndBits & TYPE_MASK);
- 
-       switch(visitor.NeedsField(fieldInfo)) {
-         case YES:
-           readField(documentInput, visitor, fieldInfo, bits);
-           break;
-         case NO:
-           skipField(documentInput, bits);
-           break;
-         case STOP:
-           return;
-       }
-     }
-   }
- 
-   public override StoredFieldsReader clone() {
-     ensureOpen();
-     return new CompressingStoredFieldsReader(this);
-   }
- 
-   CompressionMode getCompressionMode() {
-     return compressionMode;
-   }
- 
-   ChunkIterator chunkIterator(int startDocID) {
-     ensureOpen();
-     fieldsStream.Seek(indexReader.getStartPointer(startDocID));
-     return new ChunkIterator();
-   }
- 
-   private readonly class ChunkIterator {
- 
-     private IndexInput _indexInput;
-     private CompressingStoredFieldsReader _indexReader;
-     private int _numOfDocs;
-     private int _packedIntsVersion;
-     BytesRef bytes;
-     int docBase;
-     int chunkDocs;
-     int[] numStoredFields;
-     int[] lengths;
- 
-     public ChunkIterator(IndexInput indexInput, CompressingStoredFieldsReader indexReader, 
-                             int numOfDocs, int packedIntsVersion) {
-         _indexInput = indexInput;
-         _indexReader = indexReader;
-         _numOfDocs = numOfDocs;
-         _packedIntsVersion = packedIntsVersion;
-       this.docBase = -1;
-       bytes = new BytesRef();
-       numStoredFields = new int[1];
-       lengths = new int[1];
-     }
+         /** 
+          * Close the underlying {@link IndexInput}s.
+          */
+         public override void Close()
+         {
+             if (!closed)
+             {
+                 IOUtils.Close(fieldsStream, indexReader);
+                 closed = true;
+             }
+         }
  
-     /**
-      * Return the decompressed size of the chunk
-      */
-     private int chunkSize() {
-       int sum = 0;
-       for (int i = 0; i < chunkDocs; ++i) {
-         sum += lengths[i];
-       }
-       return sum;
-     }
+         private static void ReadField(ByteArrayDataInput input, StoredFieldVisitor visitor, FieldInfo info, int bits)
+         {
+             switch (bits & TYPE_MASK)
+             {
+                 case BYTE_ARR:
+                     int length = input.readVInt();
+                     byte[] data = new byte[length];
+                     input.readBytes(data, 0, length);
+                     visitor.binaryField(info, data);
+                     break;
+                 case STRING:
+                     length = input.readVInt();
+                     data = new byte[length];
+                     input.readBytes(data, 0, length);
+                     visitor.stringField(info, new string(data, IOUtils.CHARSET_UTF_8));
+                     break;
+                 case NUMERIC_INT:
+                     visitor.intField(info, input.readInt());
+                     break;
+                 case NUMERIC_FLOAT:
+                     visitor.floatField(info, Float.intBitsToFloat(input.readInt()));
+                     break;
+                 case NUMERIC_LONG:
+                     visitor.longField(info, input.readLong());
+                     break;
+                 case NUMERIC_DOUBLE:
+                     visitor.doubleField(info, Double.longBitsToDouble(input.readLong()));
+                     break;
+                 default:
+                     throw new AssertionError("Unknown type flag: " + Integer.toHexString(bits));
+             }
+         }
  
-     /**
-      * Go to the chunk containing the provided doc ID.
-      */
-     void next(int doc) {
-       _indexInput.Seek(_indexReader.getStartPointer(doc));
- 
-       int docBase = _indexInput.ReadVInt();
-       int chunkDocs = _indexInput.ReadVInt();
-       if (docBase < this.docBase + this.chunkDocs
-           || docBase + chunkDocs > _numOfDocs) {
-         throw new CorruptIndexException("Corrupted: current docBase=" + this.docBase
-             + ", current numDocs=" + this.chunkDocs + ", new docBase=" + docBase
-             + ", new numDocs=" + chunkDocs);
-       }
-       this.docBase = docBase;
-       this.chunkDocs = chunkDocs;
- 
-       if (chunkDocs > numStoredFields.Length) {
-         int newLength = ArrayUtil.Oversize(chunkDocs, 4);
-         numStoredFields = new int[newLength];
-         lengths = new int[newLength];
-       }
- 
-       if (chunkDocs == 1) {
-           numStoredFields[0] = _indexInput.ReadVInt();
-           lengths[0] = _indexInput.ReadVInt();
-       } else {
-           int bitsPerStoredFields = _indexInput.ReadVInt();
-         if (bitsPerStoredFields == 0) {
-             Arrays.Fill(numStoredFields, 0, chunkDocs, _indexInput.ReadVInt());
-         } else if (bitsPerStoredFields > 31) {
-           throw new CorruptIndexException("bitsPerStoredFields=" + bitsPerStoredFields);
-         } else {
-             PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(_indexInput, PackedInts.Format.PACKED, _packedIntsVersion, chunkDocs, bitsPerStoredFields, 1);
-           for (int i = 0; i < chunkDocs; ++i) {
-             numStoredFields[i] = (int) it.Next();
-           }
+         private static void SkipField(ByteArrayDataInput input, int bits)
+         {
+             switch (bits & TYPE_MASK)
+             {
+                 case BYTE_ARR:
+                 case STRING:
+                     int length = input.readVInt();
+                     input.skipBytes(length);
+                     break;
+                 case NUMERIC_INT:
+                 case NUMERIC_FLOAT:
+                     input.readInt();
+                     break;
+                 case NUMERIC_LONG:
+                 case NUMERIC_DOUBLE:
+                     input.readLong();
+                     break;
+                 default:
+                     throw new AssertionError("Unknown type flag: " + Integer.toHexString(bits));
+             }
          }
  
-         int bitsPerLength = _indexInput.ReadVInt();
-         if (bitsPerLength == 0) {
-             Arrays.Fill(lengths, 0, chunkDocs, _indexInput.ReadVInt());
-         } else if (bitsPerLength > 31) {
-           throw new CorruptIndexException("bitsPerLength=" + bitsPerLength);
-         } else {
-             PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(_indexInput, PackedInts.Format.PACKED, _packedIntsVersion, chunkDocs, bitsPerLength, 1);
-           for (int i = 0; i < chunkDocs; ++i) {
-             lengths[i] = (int) it.Next();
-           }
+         public override void VisitDocument(int docID, StoredFieldVisitor visitor)
+         {
+             fieldsStream.Seek(indexReader.getStartPointer(docID));
+ 
+             int docBase = fieldsStream.ReadVInt();
+             int chunkDocs = fieldsStream.ReadVInt();
+             if (docID < docBase
+                 || docID >= docBase + chunkDocs
+                 || docBase + chunkDocs > numDocs)
+             {
+                 throw new CorruptIndexException("Corrupted: docID=" + docID
+                     + ", docBase=" + docBase + ", chunkDocs=" + chunkDocs
+                     + ", numDocs=" + numDocs);
+             }
+ 
+             int numStoredFields, length, offset, totalLength;
+             if (chunkDocs == 1)
+             {
+                 numStoredFields = fieldsStream.ReadVInt();
+                 offset = 0;
+                 length = fieldsStream.ReadVInt();
+                 totalLength = length;
+             }
+             else
+             {
+                 int bitsPerStoredFields = fieldsStream.ReadVInt();
+                 if (bitsPerStoredFields == 0)
+                 {
+                     numStoredFields = fieldsStream.ReadVInt();
+                 }
+                 else if (bitsPerStoredFields > 31)
+                 {
+                     throw new CorruptIndexException("bitsPerStoredFields=" + bitsPerStoredFields);
+                 }
+                 else
+                 {
+                     long filePointer = fieldsStream.FilePointer;
+                     PackedInts.Reader reader = PackedInts.GetDirectReaderNoHeader(fieldsStream, PackedInts.Format.PACKED, packedIntsVersion, chunkDocs, bitsPerStoredFields);
+                     numStoredFields = (int)(reader.Get(docID - docBase));
+                     fieldsStream.Seek(filePointer + PackedInts.Format.PACKED.ByteCount(packedIntsVersion, chunkDocs, bitsPerStoredFields));
+                 }
+ 
+                 int bitsPerLength = fieldsStream.ReadVInt();
+                 if (bitsPerLength == 0)
+                 {
+                     length = fieldsStream.ReadVInt();
+                     offset = (docID - docBase) * length;
+                     totalLength = chunkDocs * length;
+                 }
+                 else if (bitsPerStoredFields > 31)
+                 {
+                     throw new CorruptIndexException("bitsPerLength=" + bitsPerLength);
+                 }
+                 else
+                 {
+                     PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(fieldsStream, PackedInts.Format.PACKED, packedIntsVersion, chunkDocs, bitsPerLength, 1);
+                     int off = 0;
+                     for (int i = 0; i < docID - docBase; ++i)
+                     {
+                         //TODO - HACKMP - Paul, this is a point of concern for me, in that everything from this file, and the 
+                         //decompressor.Decompress() contract is looking for int.  But, I don't want to simply cast from long to int here.
+                         off += it.Next();
+                     }
+                     offset = off;
+                     length = (int)it.Next();
+                     off += length;
+                     for (int i = docID - docBase + 1; i < chunkDocs; ++i)
+                     {
+                         off += it.Next();
+                     }
+                     totalLength = off;
+                 }
+             }
+ 
+             if ((length == 0) != (numStoredFields == 0))
+             {
+                 throw new CorruptIndexException("length=" + length + ", numStoredFields=" + numStoredFields);
+             }
+             if (numStoredFields == 0)
+             {
+                 // nothing to do
+                 return;
+             }
+ 
+             decompressor.Decompress(fieldsStream, totalLength, offset, length, bytes);
+ 
+             ByteArrayDataInput documentInput = new ByteArrayDataInput((byte[])(Array)bytes.bytes, bytes.offset, bytes.length);
+             for (int fieldIDX = 0; fieldIDX < numStoredFields; fieldIDX++)
+             {
+                 long infoAndBits = documentInput.ReadVLong();
+                 int fieldNumber = Number.URShift(infoAndBits, TYPE_BITS); // (infoAndBits >>> TYPE_BITS);
+                 FieldInfo fieldInfo = fieldInfos.FieldInfo(fieldNumber);
+ 
+                 int bits = (int)(infoAndBits & TYPE_MASK);
+ 
+                 switch (visitor.NeedsField(fieldInfo))
+                 {
+                     case StoredFieldVisitor.Status.YES:
+                         ReadField(documentInput, visitor, fieldInfo, bits);
+                         break;
+                     case StoredFieldVisitor.Status.NO:
+                         SkipField(documentInput, bits);
+                         break;
+                     case StoredFieldVisitor.Status.STOP:
+                         return;
+                 }
+             }
          }
-       }
-     }
  
-     /**
-      * Decompress the chunk.
-      */
-     void decompress(){
-       // decompress data
-       int chunkSize = this.chunkSize();
-       decompressor.Decompress(_indexInput, chunkSize, 0, chunkSize, bytes);
-       if (bytes.length != chunkSize) {
-         throw new CorruptIndexException("Corrupted: expected chunk size = " + this.chunkSize() + ", got " + bytes.length);
-       }
-     }
+         public override StoredFieldsReader Clone()
+         {
+             ensureOpen();
+             return new CompressingStoredFieldsReader(this);
+         }
  
-     /**
-      * Copy compressed data.
-      */
-     void copyCompressedData(DataOutput output){
-       long chunkEnd = docBase + chunkDocs == numDocs
-           ? fieldsStream.length()
-           : indexReader.getStartPointer(docBase + chunkDocs);
-       output.copyBytes(fieldsStream, chunkEnd - fieldsStream.getFilePointer());
-     }
+         public CompressionMode CompressionMode
+         {
+             get
+             {
+                 return compressionMode;
+             }
+         }
  
-   }
+         // .NET Port: renamed to GetChunkIterator to avoid conflict with nested type.
+         internal ChunkIterator GetChunkIterator(int startDocID)
+         {
+             ensureOpen();
+             fieldsStream.Seek(indexReader.getStartPointer(startDocID));
+             return new ChunkIterator(fieldsStream, indexReader, numDocs, packedIntsVersion, decompressor);
+         }
  
- }
+         internal sealed class ChunkIterator
+         {
+             private IndexInput _fieldsStream;
+             private CompressingStoredFieldsReader _indexReader;
+             private Decompressor _decompressor;
+             private int _numOfDocs;
+             private int _packedIntsVersion;
+             BytesRef bytes;
+             int docBase;
+             int chunkDocs;
+             int[] numStoredFields;
+             int[] lengths;
+ 
+             public ChunkIterator(IndexInput fieldsStream, CompressingStoredFieldsReader indexReader,
+                                     int numOfDocs, int packedIntsVersion, Decompressor decompressor)
+             {
+                 _indexReader = indexReader;
+                 _numOfDocs = numOfDocs;
+                 _packedIntsVersion = packedIntsVersion;
+                 _decompressor = decompressor;
+                 _fieldsStream = fieldsStream;
+                 this.docBase = -1;
+                 bytes = new BytesRef();
+                 numStoredFields = new int[1];
+                 lengths = new int[1];
+             }
+ 
+             /**
+              * Return the decompressed size of the chunk
+              */
+             public int ChunkSize()
+             {
+                 int sum = 0;
+                 for (int i = 0; i < chunkDocs; ++i)
+                 {
+                     sum += lengths[i];
+                 }
+                 return sum;
+             }
+ 
+             /**
+              * Go to the chunk containing the provided doc ID.
+              */
+             public void Next(int doc)
+             {
+                 _fieldsStream.Seek(_indexReader.getStartPointer(doc));
+ 
+                 int docBase = _fieldsStream.ReadVInt();
+                 int chunkDocs = _fieldsStream.ReadVInt();
+                 if (docBase < this.docBase + this.chunkDocs
+                     || docBase + chunkDocs > _numOfDocs)
+                 {
+                     throw new CorruptIndexException("Corrupted: current docBase=" + this.docBase
+                         + ", current numDocs=" + this.chunkDocs + ", new docBase=" + docBase
+                         + ", new numDocs=" + chunkDocs);
+                 }
+                 this.docBase = docBase;
+                 this.chunkDocs = chunkDocs;
+ 
+                 if (chunkDocs > numStoredFields.Length)
+                 {
+                     int newLength = ArrayUtil.Oversize(chunkDocs, 4);
+                     numStoredFields = new int[newLength];
+                     lengths = new int[newLength];
+                 }
+ 
+                 if (chunkDocs == 1)
+                 {
+                     numStoredFields[0] = _fieldsStream.ReadVInt();
+                     lengths[0] = _fieldsStream.ReadVInt();
+                 }
+                 else
+                 {
+                     int bitsPerStoredFields = _fieldsStream.ReadVInt();
+                     if (bitsPerStoredFields == 0)
+                     {
+                         Arrays.Fill(numStoredFields, 0, chunkDocs, _fieldsStream.ReadVInt());
+                     }
+                     else if (bitsPerStoredFields > 31)
+                     {
+                         throw new CorruptIndexException("bitsPerStoredFields=" + bitsPerStoredFields);
+                     }
+                     else
+                     {
+                         PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(_fieldsStream, PackedInts.Format.PACKED, _packedIntsVersion, chunkDocs, bitsPerStoredFields, 1);
+                         for (int i = 0; i < chunkDocs; ++i)
+                         {
+                             numStoredFields[i] = (int)it.Next();
+                         }
+                     }
+ 
+                     int bitsPerLength = _fieldsStream.ReadVInt();
+                     if (bitsPerLength == 0)
+                     {
+                         Arrays.Fill(lengths, 0, chunkDocs, _fieldsStream.ReadVInt());
+                     }
+                     else if (bitsPerLength > 31)
+                     {
+                         throw new CorruptIndexException("bitsPerLength=" + bitsPerLength);
+                     }
+                     else
+                     {
+                         PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(_fieldsStream, PackedInts.Format.PACKED, _packedIntsVersion, chunkDocs, bitsPerLength, 1);
+                         for (int i = 0; i < chunkDocs; ++i)
+                         {
+                             lengths[i] = (int)it.Next();
+                         }
+                     }
+                 }
+             }
+ 
+             /**
+              * Decompress the chunk.
+              */
+             public void Decompress()
+             {
+                 // decompress data
+                 int chunkSize = this.ChunkSize();
+                 _decompressor.Decompress(_fieldsStream, chunkSize, 0, chunkSize, bytes);
+                 if (bytes.length != chunkSize)
+                 {
+                     throw new CorruptIndexException("Corrupted: expected chunk size = " + this.ChunkSize() + ", got " + bytes.length);
+                 }
+             }
+ 
+             /**
+              * Copy compressed data.
+              */
+             public void CopyCompressedData(DataOutput output)
+             {
+                 long chunkEnd = docBase + chunkDocs == _numOfDocs
+                     ? _fieldsStream.Length
+                     : _indexReader.getStartPointer(docBase + chunkDocs);
+                 output.CopyBytes(_fieldsStream, chunkEnd - _fieldsStream.FilePointer);
+             }
+ 
+         }
+ 
+     }
  }


[35/50] [abbrv] git commit: Some work on Analyzers library

Posted by mh...@apache.org.
Some work on Analyzers library


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/4cc8ff0e
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/4cc8ff0e
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/4cc8ff0e

Branch: refs/heads/branch_4x
Commit: 4cc8ff0e445b942ee9eb6e0b4db5be60745d047e
Parents: 401752b
Author: Paul Irwin <pa...@gmail.com>
Authored: Wed Aug 7 13:38:11 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Wed Aug 7 13:38:11 2013 -0400

----------------------------------------------------------------------
 src/contrib/Analyzers/Contrib.Analyzers.csproj  |   7 +
 .../Analyzers/Core/KeywordTokenizerFactory.cs   |  15 +-
 src/contrib/Analyzers/Core/LetterTokenizer.cs   |  28 ++
 .../Analyzers/Core/LetterTokenizerFactory.cs    |  27 ++
 src/contrib/Analyzers/Support/AbstractSet.cs    | 120 +++++++
 src/contrib/Analyzers/Util/AnalysisSPILoader.cs | 115 +++++++
 src/contrib/Analyzers/Util/CharArrayMap.cs      | 311 ++++++++++++++++---
 src/contrib/Analyzers/Util/CharArraySet.cs      | 122 +++++++-
 src/contrib/Analyzers/Util/CharTokenizer.cs     | 124 ++++++++
 .../Analyzers/Util/StopwordAnalyzerBase.cs      |  80 +++++
 src/contrib/Analyzers/Util/TokenizerFactory.cs  |  38 +++
 src/contrib/Analyzers/Util/WordlistLoader.cs    | 155 +++++++++
 12 files changed, 1094 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Contrib.Analyzers.csproj
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Contrib.Analyzers.csproj b/src/contrib/Analyzers/Contrib.Analyzers.csproj
index 8b01198..8613c88 100644
--- a/src/contrib/Analyzers/Contrib.Analyzers.csproj
+++ b/src/contrib/Analyzers/Contrib.Analyzers.csproj
@@ -132,6 +132,8 @@
     <Compile Include="Core\KeywordAnalyzer.cs" />
     <Compile Include="Core\KeywordTokenizer.cs" />
     <Compile Include="Core\KeywordTokenizerFactory.cs" />
+    <Compile Include="Core\LetterTokenizer.cs" />
+    <Compile Include="Core\LetterTokenizerFactory.cs" />
     <Compile Include="Cz\CzechAnalyzer.cs" />
     <Compile Include="De\GermanAnalyzer.cs" />
     <Compile Include="De\GermanStemFilter.cs" />
@@ -199,15 +201,20 @@
     <Compile Include="Sinks\DateRecognizerSinkFilter.cs" />
     <Compile Include="Sinks\TokenRangeSinkFilter.cs" />
     <Compile Include="Sinks\TokenTypeSinkFilter.cs" />
+    <Compile Include="Support\AbstractSet.cs" />
     <Compile Include="Support\StringExtensions.cs" />
     <Compile Include="Th\ThaiAnalyzer.cs" />
     <Compile Include="Th\ThaiWordFilter.cs" />
     <Compile Include="Util\AbstractAnalysisFactory.cs" />
+    <Compile Include="Util\AnalysisSPILoader.cs" />
     <Compile Include="Util\CharacterUtils.cs" />
     <Compile Include="Util\CharArrayMap.cs" />
     <Compile Include="Util\CharArraySet.cs" />
+    <Compile Include="Util\CharTokenizer.cs" />
     <Compile Include="Util\IResourceLoader.cs" />
+    <Compile Include="Util\StopwordAnalyzerBase.cs" />
     <Compile Include="Util\TokenizerFactory.cs" />
+    <Compile Include="Util\WordlistLoader.cs" />
     <Compile Include="WordlistLoader.cs" />
   </ItemGroup>
   <ItemGroup>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Core/KeywordTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/KeywordTokenizerFactory.cs b/src/contrib/Analyzers/Core/KeywordTokenizerFactory.cs
index 24f810c..ac85065 100644
--- a/src/contrib/Analyzers/Core/KeywordTokenizerFactory.cs
+++ b/src/contrib/Analyzers/Core/KeywordTokenizerFactory.cs
@@ -1,4 +1,5 @@
-using System;
+using Lucene.Net.Analysis.Util;
+using System;
 using System.Collections.Generic;
 using System.Linq;
 using System.Text;
@@ -7,6 +8,18 @@ namespace Lucene.Net.Analysis.Core
 {
     public class KeywordTokenizerFactory : TokenizerFactory
     {
+        public KeywordTokenizerFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
 
+        public override Tokenizer Create(Net.Util.AttributeSource.AttributeFactory factory, System.IO.TextReader input)
+        {
+            return new KeywordTokenizer(factory, input, KeywordTokenizer.DEFAULT_BUFFER_SIZE);
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Core/LetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/LetterTokenizer.cs b/src/contrib/Analyzers/Core/LetterTokenizer.cs
new file mode 100644
index 0000000..669d8dc
--- /dev/null
+++ b/src/contrib/Analyzers/Core/LetterTokenizer.cs
@@ -0,0 +1,28 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public class LetterTokenizer : CharTokenizer
+    {
+        public LetterTokenizer(Version matchVersion, TextReader input)
+            : base(matchVersion, input)
+        {
+        }
+
+        public LetterTokenizer(Version matchVersion, AttributeFactory factory, TextReader input)
+            : base(matchVersion, factory, input)
+        {
+        }
+
+        protected override bool IsTokenChar(int c)
+        {
+            return char.IsLetter((char)c);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Core/LetterTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/LetterTokenizerFactory.cs b/src/contrib/Analyzers/Core/LetterTokenizerFactory.cs
new file mode 100644
index 0000000..c07a8b6
--- /dev/null
+++ b/src/contrib/Analyzers/Core/LetterTokenizerFactory.cs
@@ -0,0 +1,27 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public class LetterTokenizerFactory : TokenizerFactory
+    {
+        public LetterTokenizerFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            AssureMatchVersion();
+
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+
+        public override Tokenizer Create(Net.Util.AttributeSource.AttributeFactory factory, System.IO.TextReader input)
+        {
+            return new LetterTokenizer(luceneMatchVersion, factory, input);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Support/AbstractSet.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Support/AbstractSet.cs b/src/contrib/Analyzers/Support/AbstractSet.cs
new file mode 100644
index 0000000..f732d08
--- /dev/null
+++ b/src/contrib/Analyzers/Support/AbstractSet.cs
@@ -0,0 +1,120 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Support
+{
+    public class AbstractSet<T> : ISet<T>
+    {
+        public virtual bool Add(T item)
+        {
+            return false;
+        }
+
+        public void ExceptWith(IEnumerable<T> other)
+        {
+            foreach (var item in other)
+            {
+                this.Remove(item);
+            }
+        }
+
+        public void IntersectWith(IEnumerable<T> other)
+        {
+            var set = new HashSet<T>(other);
+
+            foreach (var item in this.ToList())
+            {
+                if (!set.Contains(item))
+                    this.Remove(item);
+            }
+        }
+
+        public bool IsProperSubsetOf(IEnumerable<T> other)
+        {
+            throw new NotImplementedException();
+        }
+
+        public bool IsProperSupersetOf(IEnumerable<T> other)
+        {
+            throw new NotImplementedException();
+        }
+
+        public bool IsSubsetOf(IEnumerable<T> other)
+        {
+            throw new NotImplementedException();
+        }
+
+        public bool IsSupersetOf(IEnumerable<T> other)
+        {
+            throw new NotImplementedException();
+        }
+
+        public bool Overlaps(IEnumerable<T> other)
+        {
+            throw new NotImplementedException();
+        }
+
+        public bool SetEquals(IEnumerable<T> other)
+        {
+            throw new NotImplementedException();
+        }
+
+        public void SymmetricExceptWith(IEnumerable<T> other)
+        {
+            throw new NotImplementedException();
+        }
+
+        public void UnionWith(IEnumerable<T> other)
+        {
+            foreach (var item in other)
+            {
+                this.Add(item);
+            }
+        }
+
+        void ICollection<T>.Add(T item)
+        {
+            Add(item);
+        }
+
+        public abstract void Clear();
+
+        public abstract bool Contains(T item);
+
+        public void CopyTo(T[] array, int arrayIndex)
+        {
+            var enumerator = GetEnumerator();
+
+            for (int i = arrayIndex; i < array.Length; i++)
+            {
+                if (!enumerator.MoveNext())
+                    break;
+
+                array[i] = enumerator.Current;
+            }
+        }
+
+        public abstract int Count { get; }
+
+        public bool IsReadOnly
+        {
+            get { return false ; }
+        }
+
+        public abstract bool Remove(T item);
+
+        public abstract IEnumerator<T> GetEnumerator();
+
+        System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
+        {
+            return GetEnumerator();
+        }
+
+        public void AddAll(IEnumerable<T> values)
+        {
+            this.UnionWith(values);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Util/AnalysisSPILoader.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/AnalysisSPILoader.cs b/src/contrib/Analyzers/Util/AnalysisSPILoader.cs
new file mode 100644
index 0000000..d06a57c
--- /dev/null
+++ b/src/contrib/Analyzers/Util/AnalysisSPILoader.cs
@@ -0,0 +1,115 @@
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading;
+
+namespace Lucene.Net.Analysis.Util
+{
+    internal sealed class AnalysisSPILoader<S>
+        where S : AbstractAnalysisFactory
+    {
+        private volatile IDictionary<string, Type> services = new HashMap<string, Type>();
+        private readonly Type clazz;
+        private readonly string[] suffixes;
+
+        public AnalysisSPILoader(Type clazz)
+            : this(clazz, new string[] { clazz.Name })
+        {
+        }
+
+        public AnalysisSPILoader(Type clazz, string[] suffixes)
+        {
+            this.clazz = clazz;
+            this.suffixes = suffixes;
+            // if clazz' classloader is not a parent of the given one, we scan clazz's classloader, too:
+            //final ClassLoader clazzClassloader = clazz.getClassLoader();
+            //if (clazzClassloader != null && !SPIClassIterator.isParentClassLoader(clazzClassloader, classloader)) {
+            //  reload(clazzClassloader);
+            //}
+            Reload();
+        }
+
+        public void Reload()
+        {
+            lock (this)
+            {
+                HashMap<String, Type> services =
+                  new HashMap<String, Type>(this.services);
+                SPIClassIterator<S> loader = SPIClassIterator<S>.Get();
+                foreach (var service in loader)
+                {
+                    //Class<? extends S> service = loader.next();
+                    String clazzName = service.Name;
+                    String name = null;
+                    foreach (String suffix in suffixes)
+                    {
+                        if (clazzName.EndsWith(suffix))
+                        {
+                            name = clazzName.Substring(0, clazzName.Length - suffix.Length).ToLowerInvariant();
+                            break;
+                        }
+                    }
+                    if (name == null)
+                    {
+                        throw new InvalidOperationException("The class name " + service.FullName +
+                          " has wrong suffix, allowed are: " + Arrays.ToString(suffixes));
+                    }
+                    // only add the first one for each name, later services will be ignored
+                    // this allows to place services before others in classpath to make 
+                    // them used instead of others
+                    //
+                    // TODO: Should we disallow duplicate names here?
+                    // Allowing it may get confusing on collisions, as different packages
+                    // could contain same factory class, which is a naming bug!
+                    // When changing this be careful to allow reload()!
+                    if (!services.ContainsKey(name))
+                    {
+                        services[name] = service;
+                    }
+                }
+                //this.services = Collections.unmodifiableMap(services);
+            }
+        }
+
+        public S NewInstance(string name, IDictionary<string, string> args)
+        {
+            Type service = LookupClass(name);
+            try
+            {
+                //var ctor = service.GetConstructor(new[] { typeof(IDictionary<string, string>) });
+                return (S)Activator.CreateInstance(service, args);
+            }
+            catch (Exception e)
+            {
+                throw new ArgumentException("SPI class of type " + clazz.FullName + " with name '" + name + "' cannot be instantiated. " +
+                      "This is likely due to a misconfiguration of the java class '" + service.FullName + "': ", e);
+            }
+        }
+
+        public Type LookupClass(String name)
+        {
+            Type service = services[name.ToLowerInvariant()];
+            if (service != null)
+            {
+                return service;
+            }
+            else
+            {
+                throw new ArgumentException("A SPI class of type " + clazz.FullName + " with name '" + name + "' does not exist. " +
+                    "You need to add the corresponding JAR file supporting this SPI to your classpath." +
+                    "The current classpath supports the following names: " + Arrays.ToString(AvailableServices));
+            }
+        }
+
+        public ICollection<String> AvailableServices
+        {
+            get
+            {
+                return services.Keys;
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Util/CharArrayMap.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/CharArrayMap.cs b/src/contrib/Analyzers/Util/CharArrayMap.cs
index 7297855..e124451 100644
--- a/src/contrib/Analyzers/Util/CharArrayMap.cs
+++ b/src/contrib/Analyzers/Util/CharArrayMap.cs
@@ -1,4 +1,5 @@
-using Lucene.Net.Support;
+using Lucene.Net.Analysis.Support;
+using Lucene.Net.Support;
 using System;
 using System.Collections.Generic;
 using System.Linq;
@@ -9,7 +10,7 @@ namespace Lucene.Net.Analysis.Util
     public class CharArrayMap<V> : IDictionary<object, V>
     {
         // private only because missing generics
-        private static readonly CharArrayMap<V> EMPTY_MAP = new EmptyCharArrayMap<Object>();
+        internal static readonly CharArrayMap<V> EMPTY_MAP = new CharArrayMap.EmptyCharArrayMap<V>();
 
         private const int INIT_SIZE = 8;
         private readonly CharacterUtils charUtils;
@@ -34,10 +35,13 @@ namespace Lucene.Net.Analysis.Util
         public CharArrayMap(Lucene.Net.Util.Version matchVersion, IDictionary<object, V> c, bool ignoreCase)
             : this(matchVersion, c.Count, ignoreCase)
         {
-            PutAll(c);
+            foreach (var kvp in c)
+            {
+                this[kvp.Key] = kvp.Value;
+            }
         }
 
-        private CharArrayMap(CharArrayMap<V> toCopy)
+        internal CharArrayMap(CharArrayMap<V> toCopy)
         {
             this.keys = toCopy.keys;
             this.values = toCopy.values;
@@ -47,24 +51,24 @@ namespace Lucene.Net.Analysis.Util
             this.matchVersion = toCopy.matchVersion;
         }
 
-        public void Clear()
+        public virtual void Clear()
         {
             count = 0;
             Arrays.Fill(keys, null);
             Arrays.Fill(values, default(V));
         }
 
-        public bool ContainsKey(char[] text, int off, int len)
+        public virtual bool ContainsKey(char[] text, int off, int len)
         {
             return keys[GetSlot(text, off, len)] != null;
         }
 
-        public bool ContainsKey(ICharSequence cs)
+        public virtual bool ContainsKey(ICharSequence cs)
         {
             return keys[GetSlot(cs)] != null;
         }
 
-        public bool ContainsKey(Object o)
+        public virtual bool ContainsKey(Object o)
         {
             if (o is char[])
             {
@@ -74,34 +78,35 @@ namespace Lucene.Net.Analysis.Util
             return ContainsKey(o.ToString());
         }
 
-        public V Get(char[] text, int off, int len)
+        public virtual V Get(char[] text, int off, int len)
         {
             return values[GetSlot(text, off, len)];
         }
 
-        public V Get(ICharSequence cs)
+        public virtual V Get(ICharSequence cs)
         {
             return values[GetSlot(cs)];
         }
 
+        public virtual V Get(object o)
+        {
+            if (o is char[])
+            {
+                char[] text = (char[])o;
+                return Get(text, 0, text.Length);
+            }
+            return Get(o.ToString());
+        }
+
         public V this[Object o]
         {
             get
             {
-                if (o is char[])
-                {
-                    char[] text = (char[])o;
-                    return Get(text, 0, text.Length);
-                }
-                return this[o.ToString()];
+                return Get(o); 
             }
             set
             {
-                if (o is char[])
-                {
-                    Put((char[])o, value);
-                }
-                Put(o.ToString(), value);
+                Put(o, value);
             }
         }
 
@@ -141,17 +146,26 @@ namespace Lucene.Net.Analysis.Util
             return pos;
         }
 
-        public V Put(ICharSequence text, V value)
+        public virtual V Put(object o, V value)
+        {
+            if (o is char[])
+            {
+                return Put((char[])o, value);
+            }
+            return Put(o.ToString(), value);
+        }
+
+        public virtual V Put(ICharSequence text, V value)
         {
             return Put(text.ToString(), value); // could be more efficient
         }
 
-        public V Put(string text, V value)
+        public virtual V Put(string text, V value)
         {
             return Put(text.ToCharArray(), value);
         }
 
-        public V Put(char[] text, V value)
+        public virtual V Put(char[] text, V value)
         {
             if (ignoreCase)
             {
@@ -300,7 +314,7 @@ namespace Lucene.Net.Analysis.Util
             return code;
         }
 
-        public void Remove(object key)
+        public virtual void Remove(object key)
         {
             throw new NotSupportedException();
         }
@@ -313,7 +327,7 @@ namespace Lucene.Net.Analysis.Util
         public override string ToString()
         {
             StringBuilder sb = new StringBuilder("{");
-            foreach (KeyValuePair<Object, V> entry in EntrySet)
+            foreach (KeyValuePair<Object, V> entry in this.GetEntrySet())
             {
                 if (sb.Length > 1) sb.Append(", ");
                 sb.Append(entry);
@@ -324,28 +338,25 @@ namespace Lucene.Net.Analysis.Util
         private EntrySet entrySet = null;
         private CharArraySet keySet = null;
 
-        internal EntrySet CreateEntrySet()
+        internal virtual EntrySet CreateEntrySet()
         {
-            return new EntrySet(true);
+            return new EntrySet(this, true);
         }
 
-        public EntrySet EntrySet
+        public EntrySet GetEntrySet()
         {
-            get
+            if (entrySet == null)
             {
-                if (entrySet == null)
-                {
-                    entrySet = CreateEntrySet();
-                }
-                return entrySet;
+                entrySet = CreateEntrySet();
             }
+            return entrySet;
         }
 
         internal ISet<object> OriginalKeySet
         {
             get
             {
-                return Keys;
+                return Keys as ISet<object>;
             }
         }
 
@@ -399,7 +410,7 @@ namespace Lucene.Net.Analysis.Util
             private int lastPos;
             private readonly bool allowModify;
 
-            private KeyValuePair<object, V> current; // .NET Port: need to store current as IEnumerator != Iterator
+            private MapEntry current; // .NET Port: need to store current as IEnumerator != Iterator
 
             public EntryIterator(CharArrayMap<V> parent, bool allowModify)
             {
@@ -423,7 +434,7 @@ namespace Lucene.Net.Analysis.Util
 
                     return true;
                 }
-                current = new MapEntry(lastPos, allowModify);
+                current = new MapEntry(parent, lastPos, allowModify);
                 return false;
             }
 
@@ -454,10 +465,10 @@ namespace Lucene.Net.Analysis.Util
                 parent.values[lastPos] = value;
                 return old;
             }
-            
+
             public KeyValuePair<object, V> Current
             {
-                get { return current; }
+                get { return current.AsKeyValuePair(); }
             }
 
             public void Dispose()
@@ -475,6 +486,224 @@ namespace Lucene.Net.Analysis.Util
             }
         }
 
-        
+        private sealed class MapEntry // : KeyValuePair<object, V> -- this doesn't work in .NET as KVP is a struct, so we wrap it instead
+        {
+            private readonly CharArrayMap<V> parent;
+            private readonly int pos;
+            private readonly bool allowModify;
+
+            public MapEntry(CharArrayMap<V> parent, int pos, bool allowModify)
+            {
+                this.parent = parent;
+                this.pos = pos;
+                this.allowModify = allowModify;
+            }
+
+            public object Key
+            {
+                get
+                {
+                    // we must clone here, as putAll to another CharArrayMap
+                    // with other case sensitivity flag would corrupt the keys
+                    return parent.keys[pos].Clone();
+                }
+            }
+
+            public V Value
+            {
+                get
+                {
+                    return parent.values[pos];
+                }
+                set
+                {
+                    if (!allowModify)
+                        throw new NotSupportedException();
+
+                    parent.values[pos] = value;
+                }
+            }
+
+            public override string ToString()
+            {
+                return new StringBuilder().Append(parent.keys[pos]).Append('=')
+                    .Append((parent.values[pos].Equals(parent)) ? "(this Map)" : parent.values[pos].ToString())
+                    .ToString();
+            }
+
+            public KeyValuePair<object, V> AsKeyValuePair()
+            {
+                return new KeyValuePair<object, V>(Key, Value);
+            }
+        }
+
+        public sealed class EntrySet : AbstractSet<KeyValuePair<object, V>>
+        {
+            private readonly CharArrayMap<V> parent;
+            private readonly bool allowModify;
+
+            public EntrySet(CharArrayMap<V> parent, bool allowModify)
+            {
+                this.parent = parent;
+                this.allowModify = allowModify;
+            }
+
+            public override IEnumerator<KeyValuePair<object, V>> GetEnumerator()
+            {
+                return new EntryIterator(parent, allowModify);
+            }
+
+            public override bool Contains(KeyValuePair<object, V> e)
+            {
+                //if (!(o instanceof Map.Entry))
+                //  return false;
+                //Map.Entry<Object,V> e = (Map.Entry<Object,V>)o;
+                Object key = e.Key;
+                Object val = e.Value;
+                Object v = parent[key];
+                return v == null ? val == null : v.Equals(val);
+            }
+
+            public override bool Remove(KeyValuePair<object, V> item)
+            {
+                throw new NotSupportedException();
+            }
+
+            public override int Count
+            {
+                get { return parent.count; }
+            }
+
+            public override void Clear()
+            {
+                if (!allowModify)
+                    throw new NotSupportedException();
+                parent.Clear();
+            }
+        }
+    }
+
+    // .NET Port: non-generic static clas to hold nested types and static methods
+    public static class CharArrayMap
+    {
+        public static CharArrayMap<V> UnmodifiableMap<V>(CharArrayMap<V> map)
+        {
+            if (map == null)
+                throw new NullReferenceException("Given map is null");
+            if (map == EmptyMap<V>() || map.Count == 0)
+                return EmptyMap<V>();
+            if (map is UnmodifiableCharArrayMap<V>)
+                return map;
+            return new UnmodifiableCharArrayMap<V>(map);
+        }
+
+        public static CharArrayMap<V> Copy<V>(Lucene.Net.Util.Version matchVersion, IDictionary<object, V> map)
+        {
+            if (map == CharArrayMap<V>.EMPTY_MAP)
+                return EmptyMap<V>();
+            if (map is CharArrayMap<V>)
+            {
+                CharArrayMap<V> m = (CharArrayMap<V>)map;
+                // use fast path instead of iterating all values
+                // this is even on very small sets ~10 times faster than iterating
+                char[][] keys = new char[m.keys.Length][];
+                Array.Copy(m.keys, 0, keys, 0, keys.Length);
+                V[] values = new V[m.values.Length];
+                Array.Copy(m.values, 0, values, 0, values.Length);
+                m = new CharArrayMap<V>(m);
+                m.keys = keys;
+                m.values = values;
+                return m;
+            }
+            return new CharArrayMap<V>(matchVersion, map, false);
+        }
+
+        public static CharArrayMap<V> EmptyMap<V>()
+        {
+            return CharArrayMap<V>.EMPTY_MAP;
+        }
+
+        internal class UnmodifiableCharArrayMap<V> : CharArrayMap<V>
+        {
+            public UnmodifiableCharArrayMap(CharArrayMap<V> map)
+                : base(map)
+            {
+            }
+
+            public override void Clear()
+            {
+                throw new NotSupportedException();
+            }
+
+            public override V Put(char[] text, V value)
+            {
+                throw new NotSupportedException();
+            }
+
+            public override V Put(ICharSequence text, V value)
+            {
+                throw new NotSupportedException();
+            }
+
+            public override V Put(string text, V value)
+            {
+                throw new NotSupportedException();
+            }
+
+            public override void Remove(object key)
+            {
+                throw new NotSupportedException();
+            }
+
+            internal override CharArrayMap<V>.EntrySet CreateEntrySet()
+            {
+                throw new NotSupportedException();
+            }
+        }
+
+        internal sealed class EmptyCharArrayMap<V> : UnmodifiableCharArrayMap<V>
+        {
+            public EmptyCharArrayMap()
+                : base(new CharArrayMap<V>(Lucene.Net.Util.Version.LUCENE_CURRENT, 0, false))
+            {
+            }
+
+            public override bool ContainsKey(char[] text, int off, int len)
+            {
+                if (text == null)
+                    throw new NullReferenceException();
+                return false;
+            }
+
+            public override bool ContainsKey(ICharSequence cs)
+            {
+                if (cs == null)
+                    throw new NullReferenceException();
+                return false;
+            }
+
+            public override bool ContainsKey(object o)
+            {
+                if (o == null)
+                    throw new NullReferenceException();
+                return false;
+            }
+
+            public override V Get(char[] text, int off, int len)
+            {
+                if (text == null)
+                    throw new NullReferenceException();
+                return default(V);
+            }
+
+            public override V Get(ICharSequence cs)
+            {
+                if (cs == null)
+                    throw new NullReferenceException();
+                return default(V);
+            }
+
+
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Util/CharArraySet.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/CharArraySet.cs b/src/contrib/Analyzers/Util/CharArraySet.cs
index 6dcc486..522bcaa 100644
--- a/src/contrib/Analyzers/Util/CharArraySet.cs
+++ b/src/contrib/Analyzers/Util/CharArraySet.cs
@@ -1,15 +1,125 @@
-using System;
+using Lucene.Net.Analysis.Support;
+using Lucene.Net.Support;
+using System;
 using System.Collections.Generic;
 using System.Linq;
 using System.Text;
 
 namespace Lucene.Net.Analysis.Util
 {
-    public class CharArraySet : ISet<object>
+    public class CharArraySet : AbstractSet<object>
     {
-        public static readonly CharArraySet EMPTY_SET = new CharArraySet(CharArrayMap.<Object>emptyMap());
-  private static readonly object PLACEHOLDER = new object();
-  
-  private readonly CharArrayMap<Object> map;
+        public static readonly CharArraySet EMPTY_SET = new CharArraySet(CharArrayMap.EmptyMap<object>());
+        private static readonly object PLACEHOLDER = new object();
+
+        private readonly CharArrayMap<object> map;
+
+        public CharArraySet(Lucene.Net.Util.Version matchVersion, int startSize, bool ignoreCase)
+            : this(new CharArrayMap<Object>(matchVersion, startSize, ignoreCase))
+        {
+        }
+
+        public CharArraySet(Lucene.Net.Util.Version matchVersion, ICollection<object> c, bool ignoreCase)
+            : this(matchVersion, c.Count, ignoreCase)
+        {
+            AddAll(c);
+        }
+
+        internal CharArraySet(CharArrayMap<Object> map)
+        {
+            this.map = map;
+        }
+
+        public override void Clear()
+        {
+            map.Clear();
+        }
+
+        public bool Contains(char[] text, int off, int len)
+        {
+            return map.ContainsKey(text, off, len);
+        }
+
+        public bool Contains(ICharSequence cs)
+        {
+            return map.ContainsKey(cs);
+        }
+
+        public override bool Contains(object o)
+        {
+            return map.ContainsKey(o);
+        }
+
+        public override bool Add(object o)
+        {
+            return map.Put(o, PLACEHOLDER) == null;
+        }
+
+        public bool Add(ICharSequence text)
+        {
+            return map.Put(text, PLACEHOLDER) == null;
+        }
+
+        public bool Add(string text)
+        {
+            return map.Put(text, PLACEHOLDER) == null;
+        }
+
+        public bool Add(char[] text)
+        {
+            return map.Put(text, PLACEHOLDER) == null;
+        }
+
+        public override int Count
+        {
+            get { return map.Count; }
+        }
+
+        public static CharArraySet UnmodifiableSet(CharArraySet set)
+        {
+            if (set == null)
+                throw new NullReferenceException("Given set is null");
+            if (set == EMPTY_SET)
+                return EMPTY_SET;
+            if (set.map is CharArrayMap.UnmodifiableCharArrayMap<object>)
+                return set;
+            return new CharArraySet(CharArrayMap.UnmodifiableMap(set.map));
+        }
+
+        public static CharArraySet Copy(Lucene.Net.Util.Version matchVersion, ICollection<object> set)
+        {
+            if (set == EMPTY_SET)
+                return EMPTY_SET;
+            if (set is CharArraySet)
+            {
+                CharArraySet source = (CharArraySet)set;
+                return new CharArraySet(CharArrayMap.Copy(source.map.matchVersion, source.map));
+            }
+            return new CharArraySet(matchVersion, set, false);
+        }
+
+        public override IEnumerator<object> GetEnumerator()
+        {
+            // use the AbstractSet#keySet()'s iterator (to not produce endless recursion)
+            return map.OriginalKeySet.GetEnumerator();
+        }
+
+        public override string ToString()
+        {
+            StringBuilder sb = new StringBuilder("[");
+            foreach (Object item in this)
+            {
+                if (sb.Length > 1) sb.Append(", ");
+                if (item is char[])
+                {
+                    sb.Append((char[])item);
+                }
+                else
+                {
+                    sb.Append(item);
+                }
+            }
+            return sb.Append(']').ToString();
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Util/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/CharTokenizer.cs b/src/contrib/Analyzers/Util/CharTokenizer.cs
new file mode 100644
index 0000000..b0029fa
--- /dev/null
+++ b/src/contrib/Analyzers/Util/CharTokenizer.cs
@@ -0,0 +1,124 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Support;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Util
+{
+    public abstract class CharTokenizer : Tokenizer
+    {
+        public CharTokenizer(Version matchVersion, TextReader input)
+            : base(input)
+        {
+            charUtils = CharacterUtils.GetInstance(matchVersion);
+            termAtt = AddAttribute<ICharTermAttribute>();
+            offsetAtt = AddAttribute<IOffsetAttribute>();
+        }
+
+        public CharTokenizer(Version matchVersion, AttributeFactory factory, TextReader input)
+            : base(factory, input)
+        {
+            charUtils = CharacterUtils.GetInstance(matchVersion);
+            termAtt = AddAttribute<ICharTermAttribute>();
+            offsetAtt = AddAttribute<IOffsetAttribute>();
+        }
+
+        // note: bufferIndex is -1 here to best-effort AIOOBE consumers that don't call reset()
+        private int offset = 0, bufferIndex = -1, dataLen = 0, finalOffset = 0;
+        private const int MAX_WORD_LEN = 255;
+        private const int IO_BUFFER_SIZE = 4096;
+
+        private readonly ICharTermAttribute termAtt; // = addAttribute(CharTermAttribute.class);
+        private readonly IOffsetAttribute offsetAtt; // = addAttribute(OffsetAttribute.class);
+
+        private readonly CharacterUtils charUtils;
+        private readonly CharacterUtils.CharacterBuffer ioBuffer = CharacterUtils.NewCharacterBuffer(IO_BUFFER_SIZE);
+
+        protected abstract bool IsTokenChar(int c);
+
+        protected virtual int Normalize(int c)
+        {
+            return c;
+        }
+
+        public override bool IncrementToken()
+        {
+            ClearAttributes();
+            int length = 0;
+            int start = -1; // this variable is always initialized
+            int end = -1;
+            char[] buffer = termAtt.Buffer;
+            while (true)
+            {
+                if (bufferIndex >= dataLen)
+                {
+                    offset += dataLen;
+                    if (!charUtils.Fill(ioBuffer, input))
+                    { // read supplementary char aware with CharacterUtils
+                        dataLen = 0; // so next offset += dataLen won't decrement offset
+                        if (length > 0)
+                        {
+                            break;
+                        }
+                        else
+                        {
+                            finalOffset = CorrectOffset(offset);
+                            return false;
+                        }
+                    }
+                    dataLen = ioBuffer.Length;
+                    bufferIndex = 0;
+                }
+                // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based methods are gone
+                int c = charUtils.CodePointAt(ioBuffer.Buffer, bufferIndex);
+                int charCount = Character.CharCount(c);
+                bufferIndex += charCount;
+
+                if (IsTokenChar(c))
+                {               // if it's a token char
+                    if (length == 0)
+                    {                // start of token
+                        //assert start == -1;
+                        start = offset + bufferIndex - charCount;
+                        end = start;
+                    }
+                    else if (length >= buffer.Length - 1)
+                    { // check if a supplementary could run out of bounds
+                        buffer = termAtt.ResizeBuffer(2 + length); // make sure a supplementary fits in the buffer
+                    }
+                    end += charCount;
+                    length += Character.ToChars(Normalize(c), buffer, length); // buffer it, normalized
+                    if (length >= MAX_WORD_LEN) // buffer overflow! make sure to check for >= surrogate pair could break == test
+                        break;
+                }
+                else if (length > 0)             // at non-Letter w/ chars
+                    break;                           // return 'em
+            }
+
+            termAtt.SetLength(length);
+            //assert start != -1;
+            offsetAtt.SetOffset(CorrectOffset(start), finalOffset = CorrectOffset(end));
+            return true;
+
+        }
+
+        public override void End()
+        {
+            // set final offset
+            offsetAtt.SetOffset(finalOffset, finalOffset);
+        }
+
+        public override void Reset()
+        {
+            bufferIndex = 0;
+            offset = 0;
+            dataLen = 0;
+            finalOffset = 0;
+            ioBuffer.Reset(); // make sure to reset the IO buffer!!
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Util/StopwordAnalyzerBase.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/StopwordAnalyzerBase.cs b/src/contrib/Analyzers/Util/StopwordAnalyzerBase.cs
new file mode 100644
index 0000000..f6e9194
--- /dev/null
+++ b/src/contrib/Analyzers/Util/StopwordAnalyzerBase.cs
@@ -0,0 +1,80 @@
+using Lucene.Net.Util;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Util
+{
+    public abstract class StopwordAnalyzerBase : Analyzer
+    {
+        protected readonly CharArraySet stopwords;
+
+        protected readonly Version matchVersion;
+
+        public CharArraySet StopwordSet
+        {
+            get
+            {
+                return stopwords;
+            }
+        }
+
+        protected StopwordAnalyzerBase(Version version, CharArraySet stopwords)
+        {
+            matchVersion = version;
+            // analyzers should use char array set for stopwords!
+            this.stopwords = stopwords == null ? CharArraySet.EMPTY_SET : CharArraySet
+                .UnmodifiableSet(CharArraySet.Copy(version, stopwords));
+        }
+
+        protected StopwordAnalyzerBase(Version version)
+            : this(version, null)
+        {
+        }
+
+        protected static CharArraySet LoadStopwordSet(bool ignoreCase, Type aClass, string resource, string comment)
+        {
+            TextReader reader = null;
+            try
+            {
+                reader = IOUtils.GetDecodingReader(aClass.Assembly.GetManifestResourceStream(resource), IOUtils.CHARSET_UTF_8);
+                return WordlistLoader.GetWordSet(reader, comment, new CharArraySet(Version.LUCENE_31, 16, ignoreCase));
+            }
+            finally
+            {
+                IOUtils.Close(reader);
+            }
+        }
+
+        protected static CharArraySet LoadStopwordSet(Stream stopwords, Version matchVersion)
+        {
+            TextReader reader = null;
+            try
+            {
+                reader = IOUtils.GetDecodingReader(stopwords, IOUtils.CHARSET_UTF_8);
+                return WordlistLoader.GetWordSet(reader, matchVersion);
+            }
+            finally
+            {
+                IOUtils.Close(reader);
+            }
+        }
+
+        protected static CharArraySet LoadStopwordSet(TextReader stopwords, Version matchVersion)
+        {
+            try
+            {
+                return WordlistLoader.GetWordSet(stopwords, matchVersion);
+            }
+            finally
+            {
+                IOUtils.Close(stopwords);
+            }
+        }
+
+        public abstract override Analyzer.TokenStreamComponents CreateComponents(string fieldName, System.IO.TextReader reader);
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Util/TokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/TokenizerFactory.cs b/src/contrib/Analyzers/Util/TokenizerFactory.cs
index 2fb600f..ea6892d 100644
--- a/src/contrib/Analyzers/Util/TokenizerFactory.cs
+++ b/src/contrib/Analyzers/Util/TokenizerFactory.cs
@@ -1,5 +1,6 @@
 using System;
 using System.Collections.Generic;
+using System.IO;
 using System.Linq;
 using System.Text;
 
@@ -7,5 +8,42 @@ namespace Lucene.Net.Analysis.Util
 {
     public abstract class TokenizerFactory : AbstractAnalysisFactory
     {
+        private static readonly AnalysisSPILoader<TokenizerFactory> loader =
+            new AnalysisSPILoader<TokenizerFactory>(typeof(TokenizerFactory));
+
+        public static TokenizerFactory ForName(String name, IDictionary<String, String> args)
+        {
+            return loader.NewInstance(name, args);
+        }
+
+        public static Type LookupClass(String name)
+        {
+            return loader.LookupClass(name);
+        }
+
+        public static ICollection<String> AvailableTokenizers
+        {
+            get
+            {
+                return loader.AvailableServices;
+            }
+        }
+
+        public static void ReloadTokenizers()
+        {
+            loader.Reload();
+        }
+
+        protected TokenizerFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+        }
+
+        public Tokenizer Create(TextReader input)
+        {
+            return Create(Lucene.Net.Util.AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, input);
+        }
+
+        public abstract Tokenizer Create(Lucene.Net.Util.AttributeSource.AttributeFactory factory, TextReader input);
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4cc8ff0e/src/contrib/Analyzers/Util/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/WordlistLoader.cs b/src/contrib/Analyzers/Util/WordlistLoader.cs
new file mode 100644
index 0000000..e78ea9b
--- /dev/null
+++ b/src/contrib/Analyzers/Util/WordlistLoader.cs
@@ -0,0 +1,155 @@
+using Lucene.Net.Util;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using System.Text.RegularExpressions;
+
+namespace Lucene.Net.Analysis.Util
+{
+    public static class WordlistLoader
+    {
+        private const int INITIAL_CAPACITY = 16;
+
+        public static CharArraySet GetWordSet(TextReader reader, CharArraySet result)
+        {
+            //BufferedReader br = null;
+            try
+            {
+                //br = getBufferedReader(reader);
+                String word = null;
+                while ((word = reader.ReadLine()) != null)
+                {
+                    result.Add(word.Trim());
+                }
+            }
+            finally
+            {
+                //IOUtils.Close(reader);
+            }
+            return result;
+        }
+
+        public static CharArraySet GetWordSet(TextReader reader, Lucene.Net.Util.Version matchVersion)
+        {
+            return GetWordSet(reader, new CharArraySet(matchVersion, INITIAL_CAPACITY, false));
+        }
+
+        public static CharArraySet GetWordSet(TextReader reader, String comment, Lucene.Net.Util.Version matchVersion)
+        {
+            return GetWordSet(reader, comment, new CharArraySet(matchVersion, INITIAL_CAPACITY, false));
+        }
+
+        public static CharArraySet GetWordSet(TextReader reader, String comment, CharArraySet result)
+        {
+            //BufferedReader br = null;
+            try
+            {
+                //br = getBufferedReader(reader);
+                String word = null;
+                while ((word = reader.ReadLine()) != null)
+                {
+                    if (word.StartsWith(comment) == false)
+                    {
+                        result.Add(word.Trim());
+                    }
+                }
+            }
+            finally
+            {
+                //IOUtils.Close(reader);
+            }
+            return result;
+        }
+
+        public static CharArraySet GetSnowballWordSet(TextReader reader, CharArraySet result)
+        {
+            //BufferedReader br = null;
+            try
+            {
+                //br = getBufferedReader(reader);
+                String line = null;
+                var rx = new Regex("\\s+");
+                while ((line = reader.ReadLine()) != null)
+                {
+                    int comment = line.IndexOf('|');
+                    if (comment >= 0) line = line.Substring(0, comment);
+                    String[] words = rx.Split(line);
+                    for (int i = 0; i < words.Length; i++)
+                        if (words[i].Length > 0) result.Add(words[i]);
+                }
+            }
+            finally
+            {
+                //IOUtils.Close(reader);
+            }
+            return result;
+        }
+
+        public static CharArraySet GetSnowballWordSet(TextReader reader, Lucene.Net.Util.Version matchVersion)
+        {
+            return GetSnowballWordSet(reader, new CharArraySet(matchVersion, INITIAL_CAPACITY, false));
+        }
+
+        public static CharArrayMap<String> GetStemDict(TextReader reader, CharArrayMap<String> result)
+        {
+            //BufferedReader br = null;
+            try
+            {
+                //br = getBufferedReader(reader);
+                String line;
+                var rx = new Regex("\t");
+                while ((line = reader.ReadLine()) != null)
+                {
+                    String[] wordstem = rx.Split(line, 2);
+                    result.Put(wordstem[0], wordstem[1]);
+                }
+            }
+            finally
+            {
+                //IOUtils.Close(reader);
+            }
+            return result;
+        }
+
+        public static IList<String> GetLines(Stream stream, Encoding charset)
+        {
+            TextReader input = null;
+            List<String> lines;
+            bool success = false;
+            try
+            {
+                input = IOUtils.GetDecodingReader(stream, charset);
+
+                lines = new List<String>();
+                for (String word = null; (word = input.ReadLine()) != null; )
+                {
+                    // skip initial bom marker
+                    if (lines.Count == 0 && word.Length > 0 && word[0] == '\uFEFF')
+                        word = word.Substring(1);
+                    // skip comments
+                    if (word.StartsWith("#")) continue;
+                    word = word.Trim();
+                    // skip blank lines
+                    if (word.Length == 0) continue;
+                    lines.Add(word);
+                }
+                success = true;
+                return lines;
+            }
+            finally
+            {
+                if (success)
+                {
+                    IOUtils.Close(input);
+                }
+                else
+                {
+                    IOUtils.CloseWhileHandlingException((IDisposable)input);
+                }
+            }
+        }
+
+    }
+}


[17/50] [abbrv] Massive cleanup, reducing compiler errors

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/FieldMaskingSpanQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/FieldMaskingSpanQuery.cs b/src/core/Search/Spans/FieldMaskingSpanQuery.cs
index 86c7c62..8abf847 100644
--- a/src/core/Search/Spans/FieldMaskingSpanQuery.cs
+++ b/src/core/Search/Spans/FieldMaskingSpanQuery.cs
@@ -95,7 +95,7 @@ namespace Lucene.Net.Search.Spans
         // :NOTE: getBoost and setBoost are not proxied to the maskedQuery
         // ...this is done to be more consistent with thigns like SpanFirstQuery
 
-        public override Spans GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
+        public override SpansBase GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
         {
             return maskedQuery.GetSpans(context, acceptDocs, termContexts);
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/NearSpansOrdered.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/NearSpansOrdered.cs b/src/core/Search/Spans/NearSpansOrdered.cs
index 8a9dfe8..a22ff84 100644
--- a/src/core/Search/Spans/NearSpansOrdered.cs
+++ b/src/core/Search/Spans/NearSpansOrdered.cs
@@ -27,412 +27,415 @@ using IndexReader = Lucene.Net.Index.IndexReader;
 
 namespace Lucene.Net.Search.Spans
 {
-	
-	/// <summary>A Spans that is formed from the ordered subspans of a SpanNearQuery
-	/// where the subspans do not overlap and have a maximum slop between them.
-	/// <p/>
-	/// The formed spans only contains minimum slop matches.<br/>
-	/// The matching slop is computed from the distance(s) between
-	/// the non overlapping matching Spans.<br/>
-	/// Successive matches are always formed from the successive Spans
-	/// of the SpanNearQuery.
-	/// <p/>
-	/// The formed spans may contain overlaps when the slop is at least 1.
-	/// For example, when querying using
-	/// <c>t1 t2 t3</c>
-	/// with slop at least 1, the fragment:
-	/// <c>t1 t2 t1 t3 t2 t3</c>
-	/// matches twice:
-	/// <c>t1 t2 .. t3      </c>
-	/// <c>      t1 .. t2 t3</c>
-	/// 
-	/// 
-	/// Expert:
-	/// Only public for subclassing.  Most implementations should not need this class
-	/// </summary>
-	public class NearSpansOrdered:Spans
-	{
-		internal class AnonymousClassComparator : System.Collections.IComparer
-		{
-			public AnonymousClassComparator(NearSpansOrdered enclosingInstance)
-			{
-			    this.enclosingInstance = enclosingInstance;
-			}
-
-			private NearSpansOrdered enclosingInstance;
-
-			public NearSpansOrdered Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
+
+    /// <summary>A Spans that is formed from the ordered subspans of a SpanNearQuery
+    /// where the subspans do not overlap and have a maximum slop between them.
+    /// <p/>
+    /// The formed spans only contains minimum slop matches.<br/>
+    /// The matching slop is computed from the distance(s) between
+    /// the non overlapping matching Spans.<br/>
+    /// Successive matches are always formed from the successive Spans
+    /// of the SpanNearQuery.
+    /// <p/>
+    /// The formed spans may contain overlaps when the slop is at least 1.
+    /// For example, when querying using
+    /// <c>t1 t2 t3</c>
+    /// with slop at least 1, the fragment:
+    /// <c>t1 t2 t1 t3 t2 t3</c>
+    /// matches twice:
+    /// <c>t1 t2 .. t3      </c>
+    /// <c>      t1 .. t2 t3</c>
+    /// 
+    /// 
+    /// Expert:
+    /// Only public for subclassing.  Most implementations should not need this class
+    /// </summary>
+    public class NearSpansOrdered : SpansBase
+    {
+        internal class AnonymousClassComparator : System.Collections.IComparer
+        {
+            public AnonymousClassComparator(NearSpansOrdered enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+
+            private NearSpansOrdered enclosingInstance;
+
+            public NearSpansOrdered Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+
+            }
 
             public virtual int Compare(object o1, object o2)
-			{
-				return ((Spans) o1).Doc - ((Spans) o2).Doc;
-			}
-		}
-
-		private int allowedSlop;
-		private bool firstTime = true;
-		private bool more = false;
-		
-		/// <summary>The spans in the same order as the SpanNearQuery </summary>
-		private Spans[] subSpans;
-		
-		/// <summary>Indicates that all subSpans have same doc() </summary>
-		private bool inSameDoc = false;
-		
-		private int matchDoc = - 1;
-		private int matchStart = - 1;
-		private int matchEnd = - 1;
-		private IList<sbyte[]> matchPayload;
-		
-		private Spans[] subSpansByDoc;
-		private IComparer spanDocComparator;
-		
-		private SpanNearQuery query;
-		private bool collectPayloads = true;
-		
-		public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
+            {
+                return ((SpansBase)o1).Doc - ((SpansBase)o2).Doc;
+            }
+        }
+
+        private int allowedSlop;
+        private bool firstTime = true;
+        private bool more = false;
+
+        /// <summary>The spans in the same order as the SpanNearQuery </summary>
+        private SpansBase[] subSpans;
+
+        /// <summary>Indicates that all subSpans have same doc() </summary>
+        private bool inSameDoc = false;
+
+        private int matchDoc = -1;
+        private int matchStart = -1;
+        private int matchEnd = -1;
+        private IList<sbyte[]> matchPayload;
+
+        private SpansBase[] subSpansByDoc;
+        private IComparer spanDocComparator;
+
+        private SpanNearQuery query;
+        private bool collectPayloads = true;
+
+        public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
             : this(spanNearQuery, context, acceptDocs, termContexts, true)
-		{
-		}
+        {
+        }
 
         public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts, bool collectPayloads)
-		{
+        {
             spanDocComparator = new AnonymousClassComparator(this);
-			if (spanNearQuery.GetClauses().Length < 2)
-			{
-				throw new ArgumentException("Less than 2 clauses: " + spanNearQuery);
-			}
-			this.collectPayloads = collectPayloads;
-			allowedSlop = spanNearQuery.Slop;
-			SpanQuery[] clauses = spanNearQuery.GetClauses();
-			subSpans = new Spans[clauses.Length];
-			matchPayload = new List<sbyte[]>();
-			subSpansByDoc = new Spans[clauses.Length];
-			for (int i = 0; i < clauses.Length; i++)
-			{
-				subSpans[i] = clauses[i].GetSpans(context, acceptDocs, termContexts);
-				subSpansByDoc[i] = subSpans[i]; // used in toSameDoc()
-			}
-			query = spanNearQuery; // kept for toString() only.
-		}
-		
-		// inherit javadocs
-
-	    public override int Doc
-	    {
-	        get { return matchDoc; }
-	    }
-
-	    // inherit javadocs
-
-	    public override int Start
-	    {
-	        get { return matchStart; }
-	    }
-
-	    // inherit javadocs
-
-	    public override int End
-	    {
-	        get { return matchEnd; }
-	    }
-
-	    public virtual Spans[] GetSubSpans()
-		{
-			return subSpans;
-		}
-		
-		// TODO: Remove warning after API has been finalized
-		// TODO: Would be nice to be able to lazy load payloads
-
-	    public override ICollection<sbyte[]> GetPayload()
-	    {
-	        return matchPayload;
-	    }
-
-	    // TODO: Remove warning after API has been finalized
-
-	    public override bool IsPayloadAvailable()
-	    {
-	        return matchPayload.Any();
-	    }
-
-        public override long Cost()
+            if (spanNearQuery.GetClauses().Length < 2)
+            {
+                throw new ArgumentException("Less than 2 clauses: " + spanNearQuery);
+            }
+            this.collectPayloads = collectPayloads;
+            allowedSlop = spanNearQuery.Slop;
+            SpanQuery[] clauses = spanNearQuery.GetClauses();
+            subSpans = new SpansBase[clauses.Length];
+            matchPayload = new List<sbyte[]>();
+            subSpansByDoc = new SpansBase[clauses.Length];
+            for (int i = 0; i < clauses.Length; i++)
+            {
+                subSpans[i] = clauses[i].GetSpans(context, acceptDocs, termContexts);
+                subSpansByDoc[i] = subSpans[i]; // used in toSameDoc()
+            }
+            query = spanNearQuery; // kept for toString() only.
+        }
+
+        // inherit javadocs
+
+        public override int Doc
+        {
+            get { return matchDoc; }
+        }
+
+        // inherit javadocs
+
+        public override int Start
+        {
+            get { return matchStart; }
+        }
+
+        // inherit javadocs
+
+        public override int End
+        {
+            get { return matchEnd; }
+        }
+
+        public virtual SpansBase[] GetSubSpans()
+        {
+            return subSpans;
+        }
+
+        // TODO: Remove warning after API has been finalized
+        // TODO: Would be nice to be able to lazy load payloads
+
+        public override ICollection<sbyte[]> GetPayload()
         {
-            var result = long.MaxValue;
-            foreach (var span in subSpans)
-                result = Math.Min(result, span.Cost());
-            return result;
+            return matchPayload;
         }
 
-	    // inherit javadocs
-		public override bool Next()
-		{
-			if (firstTime)
-			{
-				firstTime = false;
-			    foreach (Spans t in subSpans)
-			    {
-			        if (!t.Next())
-			        {
-			            more = false;
-			            return false;
-			        }
-			    }
-			    more = true;
-			}
-			if (collectPayloads)
-			{
-				matchPayload.Clear();
-			}
-			return AdvanceAfterOrdered();
-		}
-		
-		// inherit javadocs
-		public override bool SkipTo(int target)
-		{
-			if (firstTime)
-			{
-				firstTime = false;
-				for (int i = 0; i < subSpans.Length; i++)
-				{
-					if (!subSpans[i].SkipTo(target))
-					{
-						more = false;
-						return false;
-					}
-				}
-				more = true;
-			}
-			else if (more && (subSpans[0].Doc < target))
-			{
-				if (subSpans[0].SkipTo(target))
-				{
-					inSameDoc = false;
-				}
-				else
-				{
-					more = false;
-					return false;
-				}
-			}
-			if (collectPayloads)
-			{
-				matchPayload.Clear();
-			}
-			return AdvanceAfterOrdered();
-		}
-		
-		/// <summary>Advances the subSpans to just after an ordered match with a minimum slop
-		/// that is smaller than the slop allowed by the SpanNearQuery.
-		/// </summary>
-		/// <returns> true iff there is such a match.
-		/// </returns>
-		private bool AdvanceAfterOrdered()
-		{
-			while (more && (inSameDoc || ToSameDoc()))
-			{
-				if (StretchToOrder() && ShrinkToAfterShortestMatch())
-				{
-					return true;
-				}
-			}
-			return false; // no more matches
-		}
-		
-		
-		/// <summary>Advance the subSpans to the same document </summary>
-		private bool ToSameDoc()
-		{
-			Array.Sort(subSpansByDoc, spanDocComparator);
-			int firstIndex = 0;
-			int maxDoc = subSpansByDoc[subSpansByDoc.Length - 1].Doc;
-			while (subSpansByDoc[firstIndex].Doc != maxDoc)
-			{
-				if (!subSpansByDoc[firstIndex].SkipTo(maxDoc))
-				{
-					more = false;
-					inSameDoc = false;
-					return false;
-				}
-				maxDoc = subSpansByDoc[firstIndex].Doc;
-				if (++firstIndex == subSpansByDoc.Length)
-				{
-					firstIndex = 0;
-				}
-			}
-			for (int i = 0; i < subSpansByDoc.Length; i++)
-			{
-				Debug.Assert((subSpansByDoc [i].Doc == maxDoc)
-					, "NearSpansOrdered.toSameDoc() spans " + subSpansByDoc [0] 
-					+ "\n at doc " + subSpansByDoc [i].Doc 
-					+ ", but should be at " + maxDoc);
-			}
-			inSameDoc = true;
-			return true;
-		}
-		
-		/// <summary>Check whether two Spans in the same document are ordered.</summary>
-		/// <param name="spans1">
-		/// </param>
-		/// <param name="spans2">
-		/// </param>
-		/// <returns> true iff spans1 starts before spans2
-		/// or the spans start at the same position,
-		/// and spans1 ends before spans2.
-		/// </returns>
-		internal static bool DocSpansOrdered(Spans spans1, Spans spans2)
-		{
-			Debug.Assert(spans1.Doc == spans2.Doc, "doc1 " + spans1.Doc + " != doc2 " + spans2.Doc);
-			int start1 = spans1.Start;
-			int start2 = spans2.Start;
-			/* Do not call docSpansOrdered(int,int,int,int) to avoid invoking .end() : */
-			return (start1 == start2)?(spans1.End < spans2.End):(start1 < start2);
-		}
-		
-		/// <summary>Like <see cref="DocSpansOrdered(Spans,Spans)" />, but use the spans
-		/// starts and ends as parameters.
-		/// </summary>
-		private static bool DocSpansOrdered(int start1, int end1, int start2, int end2)
-		{
-			return (start1 == start2)?(end1 < end2):(start1 < start2);
-		}
-		
-		/// <summary>Order the subSpans within the same document by advancing all later spans
-		/// after the previous one.
-		/// </summary>
-		private bool StretchToOrder()
-		{
-			matchDoc = subSpans[0].Doc;
-			for (int i = 1; inSameDoc && (i < subSpans.Length); i++)
-			{
-				while (!DocSpansOrdered(subSpans[i - 1], subSpans[i]))
-				{
-					if (!subSpans[i].Next())
-					{
-						inSameDoc = false;
-						more = false;
-						break;
-					}
-					else if (matchDoc != subSpans[i].Doc)
-					{
-						inSameDoc = false;
-						break;
-					}
-				}
-			}
-			return inSameDoc;
-		}
-		
-		/// <summary>The subSpans are ordered in the same doc, so there is a possible match.
-		/// Compute the slop while making the match as short as possible by advancing
-		/// all subSpans except the last one in reverse order.
-		/// </summary>
-		private bool ShrinkToAfterShortestMatch()
-		{
-			matchStart = subSpans[subSpans.Length - 1].Start;
-			matchEnd = subSpans[subSpans.Length - 1].End;
+        // TODO: Remove warning after API has been finalized
+
+        public override bool IsPayloadAvailable()
+        {
+            return matchPayload.Any();
+        }
+
+        public override long Cost
+        {
+            get
+            {
+                var result = long.MaxValue;
+                foreach (var span in subSpans)
+                    result = Math.Min(result, span.Cost);
+                return result;
+            }
+        }
+
+        // inherit javadocs
+        public override bool Next()
+        {
+            if (firstTime)
+            {
+                firstTime = false;
+                foreach (Spans t in subSpans)
+                {
+                    if (!t.Next())
+                    {
+                        more = false;
+                        return false;
+                    }
+                }
+                more = true;
+            }
+            if (collectPayloads)
+            {
+                matchPayload.Clear();
+            }
+            return AdvanceAfterOrdered();
+        }
+
+        // inherit javadocs
+        public override bool SkipTo(int target)
+        {
+            if (firstTime)
+            {
+                firstTime = false;
+                for (int i = 0; i < subSpans.Length; i++)
+                {
+                    if (!subSpans[i].SkipTo(target))
+                    {
+                        more = false;
+                        return false;
+                    }
+                }
+                more = true;
+            }
+            else if (more && (subSpans[0].Doc < target))
+            {
+                if (subSpans[0].SkipTo(target))
+                {
+                    inSameDoc = false;
+                }
+                else
+                {
+                    more = false;
+                    return false;
+                }
+            }
+            if (collectPayloads)
+            {
+                matchPayload.Clear();
+            }
+            return AdvanceAfterOrdered();
+        }
+
+        /// <summary>Advances the subSpans to just after an ordered match with a minimum slop
+        /// that is smaller than the slop allowed by the SpanNearQuery.
+        /// </summary>
+        /// <returns> true iff there is such a match.
+        /// </returns>
+        private bool AdvanceAfterOrdered()
+        {
+            while (more && (inSameDoc || ToSameDoc()))
+            {
+                if (StretchToOrder() && ShrinkToAfterShortestMatch())
+                {
+                    return true;
+                }
+            }
+            return false; // no more matches
+        }
+
+
+        /// <summary>Advance the subSpans to the same document </summary>
+        private bool ToSameDoc()
+        {
+            Array.Sort(subSpansByDoc, spanDocComparator);
+            int firstIndex = 0;
+            int maxDoc = subSpansByDoc[subSpansByDoc.Length - 1].Doc;
+            while (subSpansByDoc[firstIndex].Doc != maxDoc)
+            {
+                if (!subSpansByDoc[firstIndex].SkipTo(maxDoc))
+                {
+                    more = false;
+                    inSameDoc = false;
+                    return false;
+                }
+                maxDoc = subSpansByDoc[firstIndex].Doc;
+                if (++firstIndex == subSpansByDoc.Length)
+                {
+                    firstIndex = 0;
+                }
+            }
+            for (int i = 0; i < subSpansByDoc.Length; i++)
+            {
+                Debug.Assert((subSpansByDoc[i].Doc == maxDoc)
+                    , "NearSpansOrdered.toSameDoc() spans " + subSpansByDoc[0]
+                    + "\n at doc " + subSpansByDoc[i].Doc
+                    + ", but should be at " + maxDoc);
+            }
+            inSameDoc = true;
+            return true;
+        }
+
+        /// <summary>Check whether two Spans in the same document are ordered.</summary>
+        /// <param name="spans1">
+        /// </param>
+        /// <param name="spans2">
+        /// </param>
+        /// <returns> true iff spans1 starts before spans2
+        /// or the spans start at the same position,
+        /// and spans1 ends before spans2.
+        /// </returns>
+        internal static bool DocSpansOrdered(SpansBase spans1, SpansBase spans2)
+        {
+            Debug.Assert(spans1.Doc == spans2.Doc, "doc1 " + spans1.Doc + " != doc2 " + spans2.Doc);
+            int start1 = spans1.Start;
+            int start2 = spans2.Start;
+            /* Do not call docSpansOrdered(int,int,int,int) to avoid invoking .end() : */
+            return (start1 == start2) ? (spans1.End < spans2.End) : (start1 < start2);
+        }
+
+        /// <summary>Like <see cref="DocSpansOrdered(Spans,Spans)" />, but use the spans
+        /// starts and ends as parameters.
+        /// </summary>
+        private static bool DocSpansOrdered(int start1, int end1, int start2, int end2)
+        {
+            return (start1 == start2) ? (end1 < end2) : (start1 < start2);
+        }
+
+        /// <summary>Order the subSpans within the same document by advancing all later spans
+        /// after the previous one.
+        /// </summary>
+        private bool StretchToOrder()
+        {
+            matchDoc = subSpans[0].Doc;
+            for (int i = 1; inSameDoc && (i < subSpans.Length); i++)
+            {
+                while (!DocSpansOrdered(subSpans[i - 1], subSpans[i]))
+                {
+                    if (!subSpans[i].Next())
+                    {
+                        inSameDoc = false;
+                        more = false;
+                        break;
+                    }
+                    else if (matchDoc != subSpans[i].Doc)
+                    {
+                        inSameDoc = false;
+                        break;
+                    }
+                }
+            }
+            return inSameDoc;
+        }
+
+        /// <summary>The subSpans are ordered in the same doc, so there is a possible match.
+        /// Compute the slop while making the match as short as possible by advancing
+        /// all subSpans except the last one in reverse order.
+        /// </summary>
+        private bool ShrinkToAfterShortestMatch()
+        {
+            matchStart = subSpans[subSpans.Length - 1].Start;
+            matchEnd = subSpans[subSpans.Length - 1].End;
 
             ISet<sbyte[]> possibleMatchPayloads = new HashSet<sbyte[]>();
             if (subSpans[subSpans.Length - 1].IsPayloadAvailable())
             {
                 possibleMatchPayloads.UnionWith(subSpans[subSpans.Length - 1].GetPayload());
             }
-			
-		    IList<sbyte[]> possiblePayload = null;
-			
-			int matchSlop = 0;
-			int lastStart = matchStart;
-			int lastEnd = matchEnd;
-			for (int i = subSpans.Length - 2; i >= 0; i--)
-			{
-				Spans prevSpans = subSpans[i];
-				if (collectPayloads && prevSpans.IsPayloadAvailable())
-				{
-					ICollection<sbyte[]> payload = prevSpans.GetPayload();
-					possiblePayload = new List<sbyte[]>(payload.Count);
-					possiblePayload.AddRange(payload);
-				}
-				
-				int prevStart = prevSpans.Start;
-				int prevEnd = prevSpans.End;
-				while (true)
-				{
-					// Advance prevSpans until after (lastStart, lastEnd)
-					if (!prevSpans.Next())
-					{
-						inSameDoc = false;
-						more = false;
-						break; // Check remaining subSpans for final match.
-					}
-					else if (matchDoc != prevSpans.Doc)
-					{
-						inSameDoc = false; // The last subSpans is not advanced here.
-						break; // Check remaining subSpans for last match in this document.
-					}
-					else
-					{
-						int ppStart = prevSpans.Start;
-						int ppEnd = prevSpans.End; // Cannot avoid invoking .end()
-						if (!DocSpansOrdered(ppStart, ppEnd, lastStart, lastEnd))
-						{
-							break; // Check remaining subSpans.
-						}
-						else
-						{
-							// prevSpans still before (lastStart, lastEnd)
-							prevStart = ppStart;
-							prevEnd = ppEnd;
-							if (collectPayloads && prevSpans.IsPayloadAvailable())
-							{
-								ICollection<sbyte[]> payload = prevSpans.GetPayload();
-								possiblePayload = new List<sbyte[]>(payload.Count);
-								possiblePayload.AddRange(payload);
-							}
-						}
-					}
-				}
-				
-				if (collectPayloads && possiblePayload != null)
-				{
+
+            IList<sbyte[]> possiblePayload = null;
+
+            int matchSlop = 0;
+            int lastStart = matchStart;
+            int lastEnd = matchEnd;
+            for (int i = subSpans.Length - 2; i >= 0; i--)
+            {
+                Spans prevSpans = subSpans[i];
+                if (collectPayloads && prevSpans.IsPayloadAvailable())
+                {
+                    ICollection<sbyte[]> payload = prevSpans.GetPayload();
+                    possiblePayload = new List<sbyte[]>(payload.Count);
+                    possiblePayload.AddRange(payload);
+                }
+
+                int prevStart = prevSpans.Start;
+                int prevEnd = prevSpans.End;
+                while (true)
+                {
+                    // Advance prevSpans until after (lastStart, lastEnd)
+                    if (!prevSpans.Next())
+                    {
+                        inSameDoc = false;
+                        more = false;
+                        break; // Check remaining subSpans for final match.
+                    }
+                    else if (matchDoc != prevSpans.Doc)
+                    {
+                        inSameDoc = false; // The last subSpans is not advanced here.
+                        break; // Check remaining subSpans for last match in this document.
+                    }
+                    else
+                    {
+                        int ppStart = prevSpans.Start;
+                        int ppEnd = prevSpans.End; // Cannot avoid invoking .end()
+                        if (!DocSpansOrdered(ppStart, ppEnd, lastStart, lastEnd))
+                        {
+                            break; // Check remaining subSpans.
+                        }
+                        else
+                        {
+                            // prevSpans still before (lastStart, lastEnd)
+                            prevStart = ppStart;
+                            prevEnd = ppEnd;
+                            if (collectPayloads && prevSpans.IsPayloadAvailable())
+                            {
+                                ICollection<sbyte[]> payload = prevSpans.GetPayload();
+                                possiblePayload = new List<sbyte[]>(payload.Count);
+                                possiblePayload.AddRange(payload);
+                            }
+                        }
+                    }
+                }
+
+                if (collectPayloads && possiblePayload != null)
+                {
                     possibleMatchPayloads.UnionWith(possiblePayload);
-				}
-				
-				Debug.Assert(prevStart <= matchStart);
-				if (matchStart > prevEnd)
-				{
-					// Only non overlapping spans add to slop.
-					matchSlop += (matchStart - prevEnd);
-				}
-				
-				/* Do not break on (matchSlop > allowedSlop) here to make sure
-				* that subSpans[0] is advanced after the match, if any.
-				*/
-				matchStart = prevStart;
-				lastStart = prevStart;
-				lastEnd = prevEnd;
-			}
-			
-			bool match = matchSlop <= allowedSlop;
-			
-			if (collectPayloads && match && possibleMatchPayloads.Count > 0)
-			{
+                }
+
+                Debug.Assert(prevStart <= matchStart);
+                if (matchStart > prevEnd)
+                {
+                    // Only non overlapping spans add to slop.
+                    matchSlop += (matchStart - prevEnd);
+                }
+
+                /* Do not break on (matchSlop > allowedSlop) here to make sure
+                * that subSpans[0] is advanced after the match, if any.
+                */
+                matchStart = prevStart;
+                lastStart = prevStart;
+                lastEnd = prevEnd;
+            }
+
+            bool match = matchSlop <= allowedSlop;
+
+            if (collectPayloads && match && possibleMatchPayloads.Count > 0)
+            {
                 matchPayload.AddRange(possibleMatchPayloads);
-			}
-			
-			return match; // ordered and allowed slop
-		}
-		
-		public override string ToString()
-		{
-			return GetType().FullName + "(" + query.ToString() + ")@" + (firstTime?"START":(more?(Doc + ":" + Start + "-" + End):"END"));
-		}
-	}
+            }
+
+            return match; // ordered and allowed slop
+        }
+
+        public override string ToString()
+        {
+            return GetType().FullName + "(" + query.ToString() + ")@" + (firstTime ? "START" : (more ? (Doc + ":" + Start + "-" + End) : "END"));
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/NearSpansUnordered.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/NearSpansUnordered.cs b/src/core/Search/Spans/NearSpansUnordered.cs
index 4e2750d..d472b87 100644
--- a/src/core/Search/Spans/NearSpansUnordered.cs
+++ b/src/core/Search/Spans/NearSpansUnordered.cs
@@ -24,399 +24,408 @@ using IndexReader = Lucene.Net.Index.IndexReader;
 
 namespace Lucene.Net.Search.Spans
 {
-	
-	/// <summary> Similar to <see cref="NearSpansOrdered" />, but for the unordered case.
-	/// 
-	/// Expert:
-	/// Only public for subclassing.  Most implementations should not need this class
-	/// </summary>
-	public class NearSpansUnordered : Spans
-	{
-		private SpanNearQuery query;
-		
-		private IList<SpansCell> ordered = new List<SpansCell>(); // spans in query order
-		private Spans[] subSpans;
-		private int slop; // from query
-		
-		private SpansCell first; // linked list of spans
-		private SpansCell last; // sorted by doc only
-		
-		private int totalLength; // sum of current lengths
-		
-		private CellQueue queue; // sorted queue of spans
-		private SpansCell max; // max element in queue
-		
-		private bool more = true; // true iff not done
-		private bool firstTime = true; // true before first next()
-		
-		private class CellQueue : PriorityQueue<SpansCell>
-		{
-			private NearSpansUnordered enclosingInstance;
-			public NearSpansUnordered Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			public CellQueue(NearSpansUnordered enclosingInstance, int size) : base(size)
-			{
-			    this.enclosingInstance = enclosingInstance;
-			}
+
+    /// <summary> Similar to <see cref="NearSpansOrdered" />, but for the unordered case.
+    /// 
+    /// Expert:
+    /// Only public for subclassing.  Most implementations should not need this class
+    /// </summary>
+    public class NearSpansUnordered : SpansBase
+    {
+        private SpanNearQuery query;
+
+        private IList<SpansCell> ordered = new List<SpansCell>(); // spans in query order
+        private SpansBase[] subSpans;
+        private int slop; // from query
+
+        private SpansCell first; // linked list of spans
+        private SpansCell last; // sorted by doc only
+
+        private int totalLength; // sum of current lengths
+
+        private CellQueue queue; // sorted queue of spans
+        private SpansCell max; // max element in queue
+
+        private bool more = true; // true iff not done
+        private bool firstTime = true; // true before first next()
+
+        private class CellQueue : PriorityQueue<SpansCell>
+        {
+            private NearSpansUnordered enclosingInstance;
+            public NearSpansUnordered Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+
+            }
+            public CellQueue(NearSpansUnordered enclosingInstance, int size)
+                : base(size)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
 
             public override bool LessThan(SpansCell spans1, SpansCell spans2)
-			{
-				if (spans1.Doc == spans2.Doc)
-				{
-					return NearSpansOrdered.DocSpansOrdered(spans1, spans2);
-				}
-				else
-				{
-					return spans1.Doc < spans2.Doc;
-				}
-			}
-		}
-		
-		
-		/// <summary>Wraps a Spans, and can be used to form a linked list. </summary>
-		private class SpansCell:Spans
-		{
-			private NearSpansUnordered enclosingInstance;
-			public NearSpansUnordered Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			internal /*private*/ Spans spans;
-			internal /*private*/ SpansCell next;
-			private int length = - 1;
-			private int index;
-			
-			public SpansCell(NearSpansUnordered enclosingInstance, Spans spans, int index)
-			{
-			    this.enclosingInstance = enclosingInstance;
-				this.spans = spans;
-				this.index = index;
-			}
-			
-			public override bool Next()
-			{
-				return Adjust(spans.Next());
-			}
-			
-			public override bool SkipTo(int target)
-			{
-				return Adjust(spans.SkipTo(target));
-			}
-			
-			private bool Adjust(bool condition)
-			{
-				if (length != - 1)
-				{
-					Enclosing_Instance.totalLength -= length; // subtract old length
-				}
-				if (condition)
-				{
-					length = End - Start;
-					Enclosing_Instance.totalLength += length; // add new length
-					
-					if (Enclosing_Instance.max == null || Doc > Enclosing_Instance.max.Doc 
+            {
+                if (spans1.Doc == spans2.Doc)
+                {
+                    return NearSpansOrdered.DocSpansOrdered(spans1, spans2);
+                }
+                else
+                {
+                    return spans1.Doc < spans2.Doc;
+                }
+            }
+        }
+
+
+        /// <summary>Wraps a Spans, and can be used to form a linked list. </summary>
+        private class SpansCell : SpansBase
+        {
+            private NearSpansUnordered enclosingInstance;
+            public NearSpansUnordered Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+
+            }
+            internal /*private*/ SpansBase spans;
+            internal /*private*/ SpansCell next;
+            private int length = -1;
+            private int index;
+
+            public SpansCell(NearSpansUnordered enclosingInstance, SpansBase spans, int index)
+            {
+                this.enclosingInstance = enclosingInstance;
+                this.spans = spans;
+                this.index = index;
+            }
+
+            public override bool Next()
+            {
+                return Adjust(spans.Next());
+            }
+
+            public override bool SkipTo(int target)
+            {
+                return Adjust(spans.SkipTo(target));
+            }
+
+            private bool Adjust(bool condition)
+            {
+                if (length != -1)
+                {
+                    Enclosing_Instance.totalLength -= length; // subtract old length
+                }
+                if (condition)
+                {
+                    length = End - Start;
+                    Enclosing_Instance.totalLength += length; // add new length
+
+                    if (Enclosing_Instance.max == null || Doc > Enclosing_Instance.max.Doc
                         || (Doc == Enclosing_Instance.max.Doc) && (End > Enclosing_Instance.max.End))
-					{
-						Enclosing_Instance.max = this;
-					}
-				}
-				Enclosing_Instance.more = condition;
-				return condition;
-			}
-
-		    public override int Doc
-		    {
-		        get { return spans.Doc; }
-		    }
-
-		    public override int Start
-		    {
-		        get { return spans.Start; }
-		    }
-
-		    public override int End
-		    {
-		        get { return spans.End; }
-		    }
-
-		    // TODO: Remove warning after API has been finalized
-
-		    public override ICollection<sbyte[]> GetPayload()
-		    {
-		        return spans.GetPayload().ToArray();
-		    }
-
-		    // TODO: Remove warning after API has been finalized
-
-		    public override bool IsPayloadAvailable()
-		    {
-		        return spans.IsPayloadAvailable();
-		    }
-
-		    public override string ToString()
-			{
-				return spans.ToString() + "#" + index;
-			}
-		}
-		
-		
-		public NearSpansUnordered(SpanNearQuery query, AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
-		{
-			this.query = query;
-			this.slop = query.Slop;
-			
-			SpanQuery[] clauses = query.GetClauses();
-			queue = new CellQueue(this, clauses.Length);
-			subSpans = new Spans[clauses.Length];
-			for (int i = 0; i < clauses.Length; i++)
-			{
-				SpansCell cell = new SpansCell(this, clauses[i].GetSpans(context, acceptDocs, termContexts), i);
-				ordered.Add(cell);
-				subSpans[i] = cell.spans;
-			}
-		}
-		public virtual Spans[] GetSubSpans()
-		{
-			return subSpans;
-		}
-		public override bool Next()
-		{
-			if (firstTime)
-			{
-				InitList(true);
-				ListToQueue(); // initialize queue
-				firstTime = false;
-			}
-			else if (more)
-			{
-				if (Min().Next())
-				{
-					// trigger further scanning
-					queue.UpdateTop(); // maintain queue
-				}
-				else
-				{
-					more = false;
-				}
-			}
-			
-			while (more)
-			{
-				
-				bool queueStale = false;
-				
-				if (Min().Doc != max.Doc)
-				{
-					// maintain list
-					QueueToList();
-					queueStale = true;
-				}
-				
-				// skip to doc w/ all clauses
-				
-				while (more && first.Doc < last.Doc)
-				{
-					more = first.SkipTo(last.Doc); // skip first upto last
-					FirstToLast(); // and move it to the end
-					queueStale = true;
-				}
-				
-				if (!more)
-					return false;
-				
-				// found doc w/ all clauses
-				
-				if (queueStale)
-				{
-					// maintain the queue
-					ListToQueue();
-					queueStale = false;
-				}
-				
-				if (AtMatch())
-				{
-					return true;
-				}
-				
-				more = Min().Next();
-				if (more)
-				{
-					queue.UpdateTop(); // maintain queue
-				}
-			}
-			return false; // no more matches
-		}
-		
-		public override bool SkipTo(int target)
-		{
-			if (firstTime)
-			{
-				// initialize
-				InitList(false);
-				for (SpansCell cell = first; more && cell != null; cell = cell.next)
-				{
-					more = cell.SkipTo(target); // skip all
-				}
-				if (more)
-				{
-					ListToQueue();
-				}
-				firstTime = false;
-			}
-			else
-			{
-				// normal case
-				while (more && Min().Doc < target)
-				{
-					// skip as needed
-					if (Min().SkipTo(target))
-					{
-						queue.UpdateTop();
-					}
-					else
-					{
-						more = false;
-					}
-				}
-			}
-			return more && (AtMatch() || Next());
-		}
-		
-		private SpansCell Min()
-		{
-			return queue.Top();
-		}
-
-	    public override int Doc
-	    {
-	        get { return Min().Doc; }
-	    }
-
-	    public override int Start
-	    {
-	        get { return Min().Start; }
-	    }
-
-	    public override int End
-	    {
-	        get { return max.End; }
-	    }
-
-	    // TODO: Remove warning after API has been finalized
-
-	    /// <summary> WARNING: The List is not necessarily in order of the the positions</summary>
-	    /// <returns> Collection of &amp;lt;c&amp;gt;byte[]&amp;lt;/c&amp;gt; payloads </returns>
-	    /// <throws>  IOException </throws>
-	    public override ICollection<sbyte[]> GetPayload()
-	    {
+                    {
+                        Enclosing_Instance.max = this;
+                    }
+                }
+                Enclosing_Instance.more = condition;
+                return condition;
+            }
+
+            public override int Doc
+            {
+                get { return spans.Doc; }
+            }
+
+            public override int Start
+            {
+                get { return spans.Start; }
+            }
+
+            public override int End
+            {
+                get { return spans.End; }
+            }
+
+            // TODO: Remove warning after API has been finalized
+
+            public override ICollection<sbyte[]> GetPayload()
+            {
+                return spans.GetPayload().ToArray();
+            }
+
+            // TODO: Remove warning after API has been finalized
+
+            public override bool IsPayloadAvailable()
+            {
+                return spans.IsPayloadAvailable();
+            }
+
+            public override long Cost
+            {
+                get { return spans.Cost; }
+            }
+
+            public override string ToString()
+            {
+                return spans.ToString() + "#" + index;
+            }
+        }
+
+
+        public NearSpansUnordered(SpanNearQuery query, AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
+        {
+            this.query = query;
+            this.slop = query.Slop;
+
+            SpanQuery[] clauses = query.GetClauses();
+            queue = new CellQueue(this, clauses.Length);
+            subSpans = new Spans[clauses.Length];
+            for (int i = 0; i < clauses.Length; i++)
+            {
+                SpansCell cell = new SpansCell(this, clauses[i].GetSpans(context, acceptDocs, termContexts), i);
+                ordered.Add(cell);
+                subSpans[i] = cell.spans;
+            }
+        }
+        public virtual SpansBase[] GetSubSpans()
+        {
+            return subSpans;
+        }
+        public override bool Next()
+        {
+            if (firstTime)
+            {
+                InitList(true);
+                ListToQueue(); // initialize queue
+                firstTime = false;
+            }
+            else if (more)
+            {
+                if (Min().Next())
+                {
+                    // trigger further scanning
+                    queue.UpdateTop(); // maintain queue
+                }
+                else
+                {
+                    more = false;
+                }
+            }
+
+            while (more)
+            {
+
+                bool queueStale = false;
+
+                if (Min().Doc != max.Doc)
+                {
+                    // maintain list
+                    QueueToList();
+                    queueStale = true;
+                }
+
+                // skip to doc w/ all clauses
+
+                while (more && first.Doc < last.Doc)
+                {
+                    more = first.SkipTo(last.Doc); // skip first upto last
+                    FirstToLast(); // and move it to the end
+                    queueStale = true;
+                }
+
+                if (!more)
+                    return false;
+
+                // found doc w/ all clauses
+
+                if (queueStale)
+                {
+                    // maintain the queue
+                    ListToQueue();
+                    queueStale = false;
+                }
+
+                if (AtMatch())
+                {
+                    return true;
+                }
+
+                more = Min().Next();
+                if (more)
+                {
+                    queue.UpdateTop(); // maintain queue
+                }
+            }
+            return false; // no more matches
+        }
+
+        public override bool SkipTo(int target)
+        {
+            if (firstTime)
+            {
+                // initialize
+                InitList(false);
+                for (SpansCell cell = first; more && cell != null; cell = cell.next)
+                {
+                    more = cell.SkipTo(target); // skip all
+                }
+                if (more)
+                {
+                    ListToQueue();
+                }
+                firstTime = false;
+            }
+            else
+            {
+                // normal case
+                while (more && Min().Doc < target)
+                {
+                    // skip as needed
+                    if (Min().SkipTo(target))
+                    {
+                        queue.UpdateTop();
+                    }
+                    else
+                    {
+                        more = false;
+                    }
+                }
+            }
+            return more && (AtMatch() || Next());
+        }
+
+        private SpansCell Min()
+        {
+            return queue.Top();
+        }
+
+        public override int Doc
+        {
+            get { return Min().Doc; }
+        }
+
+        public override int Start
+        {
+            get { return Min().Start; }
+        }
+
+        public override int End
+        {
+            get { return max.End; }
+        }
+
+        // TODO: Remove warning after API has been finalized
+
+        /// <summary> WARNING: The List is not necessarily in order of the the positions</summary>
+        /// <returns> Collection of &amp;lt;c&amp;gt;byte[]&amp;lt;/c&amp;gt; payloads </returns>
+        /// <throws>  IOException </throws>
+        public override ICollection<sbyte[]> GetPayload()
+        {
             ISet<sbyte[]> matchPayload = Support.Compatibility.SetFactory.CreateHashSet<sbyte[]>();
-	        for (SpansCell cell = first; cell != null; cell = cell.next)
-	        {
-	            if (cell.IsPayloadAvailable())
-	            {
-	                matchPayload.UnionWith(cell.GetPayload());
-	            }
-	        }
-	        return matchPayload;
-	    }
-
-	    // TODO: Remove warning after API has been finalized
-
-	    public override bool IsPayloadAvailable()
-	    {
-	        SpansCell pointer = Min();
-	        while (pointer != null)
-	        {
-	            if (pointer.IsPayloadAvailable())
-	            {
-	                return true;
-	            }
-	            pointer = pointer.next;
-	        }
-
-	        return false;
-	    }
-
-        public override long Cost()
+            for (SpansCell cell = first; cell != null; cell = cell.next)
+            {
+                if (cell.IsPayloadAvailable())
+                {
+                    matchPayload.UnionWith(cell.GetPayload());
+                }
+            }
+            return matchPayload;
+        }
+
+        // TODO: Remove warning after API has been finalized
+
+        public override bool IsPayloadAvailable()
+        {
+            SpansCell pointer = Min();
+            while (pointer != null)
+            {
+                if (pointer.IsPayloadAvailable())
+                {
+                    return true;
+                }
+                pointer = pointer.next;
+            }
+
+            return false;
+        }
+
+        public override long Cost
+        {
+            get
+            {
+                var minCost = long.MaxValue;
+                foreach (var item in subSpans)
+                    minCost = Math.Min(minCost, item.Cost);
+
+                return minCost;
+            }
+        }
+
+        public override string ToString()
+        {
+            return GetType().FullName + "(" + query.ToString() + ")@" + (firstTime ? "START" : (more ? (Doc + ":" + Start + "-" + End) : "END"));
+        }
+
+        private void InitList(bool next)
         {
-            var minCost = long.MaxValue;
-            foreach (var item in subSpans)
-                minCost = Math.Min(minCost, item.Cost());
+            for (int i = 0; more && i < ordered.Count; i++)
+            {
+                SpansCell cell = ordered[i];
+                if (next)
+                    more = cell.Next(); // move to first entry
+                if (more)
+                {
+                    AddToList(cell); // add to list
+                }
+            }
+        }
 
-            return minCost;
+        private void AddToList(SpansCell cell)
+        {
+            if (last != null)
+            {
+                // add next to end of list
+                last.next = cell;
+            }
+            else
+                first = cell;
+            last = cell;
+            cell.next = null;
         }
 
-	    public override string ToString()
-		{
-			return GetType().FullName + "(" + query.ToString() + ")@" + (firstTime?"START":(more?(Doc + ":" + Start + "-" + End):"END"));
-		}
-		
-		private void  InitList(bool next)
-		{
-			for (int i = 0; more && i < ordered.Count; i++)
-			{
-				SpansCell cell = ordered[i];
-				if (next)
-					more = cell.Next(); // move to first entry
-				if (more)
-				{
-					AddToList(cell); // add to list
-				}
-			}
-		}
-		
-		private void  AddToList(SpansCell cell)
-		{
-			if (last != null)
-			{
-				// add next to end of list
-				last.next = cell;
-			}
-			else
-				first = cell;
-			last = cell;
-			cell.next = null;
-		}
-		
-		private void  FirstToLast()
-		{
-			last.next = first; // move first to end of list
-			last = first;
-			first = first.next;
-			last.next = null;
-		}
-		
-		private void  QueueToList()
-		{
-			last = first = null;
-			while (queue.Top() != null)
-			{
-				AddToList(queue.Pop());
-			}
-		}
-		
-		private void  ListToQueue()
-		{
-			queue.Clear(); // rebuild queue
-			for (SpansCell cell = first; cell != null; cell = cell.next)
-			{
-				queue.Add(cell); // add to queue from list
-			}
-		}
-		
-		private bool AtMatch()
-		{
-			return (Min().Doc == max.Doc) && ((max.End - Min().Start - totalLength) <= slop);
-		}
-	}
+        private void FirstToLast()
+        {
+            last.next = first; // move first to end of list
+            last = first;
+            first = first.next;
+            last.next = null;
+        }
+
+        private void QueueToList()
+        {
+            last = first = null;
+            while (queue.Top() != null)
+            {
+                AddToList(queue.Pop());
+            }
+        }
+
+        private void ListToQueue()
+        {
+            queue.Clear(); // rebuild queue
+            for (SpansCell cell = first; cell != null; cell = cell.next)
+            {
+                queue.Add(cell); // add to queue from list
+            }
+        }
+
+        private bool AtMatch()
+        {
+            return (Min().Doc == max.Doc) && ((max.End - Min().Start - totalLength) <= slop);
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanFirstQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanFirstQuery.cs b/src/core/Search/Spans/SpanFirstQuery.cs
index 92914cb..a0357e6 100644
--- a/src/core/Search/Spans/SpanFirstQuery.cs
+++ b/src/core/Search/Spans/SpanFirstQuery.cs
@@ -15,6 +15,7 @@
  * limitations under the License.
  */
 
+using Lucene.Net.Support;
 using System;
 using System.Text;
 using ToStringUtils = Lucene.Net.Util.ToStringUtils;
@@ -29,7 +30,7 @@ namespace Lucene.Net.Search.Spans
         {
         }
 
-        protected override AcceptStatus AcceptPosition(Spans spans)
+        protected override AcceptStatus AcceptPosition(SpansBase spans)
         {
             //assert spans.start() != spans.end() : "start equals end: " + spans.start();
             if (spans.Start >= end)
@@ -53,7 +54,7 @@ namespace Lucene.Net.Search.Spans
             return buffer.ToString();
         }
 
-        public override SpanFirstQuery Clone()
+        public override object Clone()
         {
             SpanFirstQuery spanFirstQuery = new SpanFirstQuery((SpanQuery) match.clone(), end);
             spanFirstQuery.Boost = Boost;
@@ -74,7 +75,7 @@ namespace Lucene.Net.Search.Spans
         public override int GetHashCode()
         {
             int h = match.hashCode();
-            h ^= (h << 8) | (h >> > 25); // reversible
+            h ^= (h << 8) | Number.URShift(h, 25); // reversible
             h ^= Float.floatToRawIntBits(Boost) ^ end;
             return h;
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanMultiTermQueryWrapper.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanMultiTermQueryWrapper.cs b/src/core/Search/Spans/SpanMultiTermQueryWrapper.cs
index 7902bee..4234488 100644
--- a/src/core/Search/Spans/SpanMultiTermQueryWrapper.cs
+++ b/src/core/Search/Spans/SpanMultiTermQueryWrapper.cs
@@ -15,7 +15,7 @@ namespace Lucene.Net.Search.Spans
         {
             this.query = query;
 
-            MultiTermQuery.RewriteMethod method = query.RewriteMethod;
+            MultiTermQuery.RewriteMethod method = query.GetRewriteMethod();
             if (method is TopTermsRewrite<Q>)
             {
                 int pqsize = ((TopTermsRewrite<Q>) method).Size;
@@ -31,7 +31,7 @@ namespace Lucene.Net.Search.Spans
         {
             get
             {
-                MultiTermQuery.RewriteMethod m = query.RewriteMethod;
+                MultiTermQuery.RewriteMethod m = query.GetRewriteMethod();
                 if (!(m is SpanRewriteMethod))
                     throw new NotSupportedException(
                         "You can only use SpanMultiTermQueryWrapper with a suitable SpanRewriteMethod.");
@@ -39,11 +39,11 @@ namespace Lucene.Net.Search.Spans
             }
             set
             {
-                query.RewriteMethod = value;
+                query.SetRewriteMethod(value);
             }
         }
 
-        public override Spans GetSpans(AtomicReaderContext context, IBits acceptDocs,
+        public override SpansBase GetSpans(AtomicReaderContext context, IBits acceptDocs,
                                        IDictionary<Term, TermContext> termContexts)
         {
             throw new NotSupportedException("Query should have been rewritten");
@@ -68,7 +68,7 @@ namespace Lucene.Net.Search.Spans
             var q = query.Rewrite(reader);
             if (!(q is SpanQuery))
                 throw new NotSupportedException(
-                    "You can only use SpanMultiTermQueryWrapper with a suitable SpanRewriteMethod.") 
+                    "You can only use SpanMultiTermQueryWrapper with a suitable SpanRewriteMethod.");
             return q;
         }
 
@@ -88,16 +88,19 @@ namespace Lucene.Net.Search.Spans
 
         public abstract class SpanRewriteMethod : MultiTermQuery.RewriteMethod
         {
-            public abstract override SpanQuery Rewrite(IndexReader reader, MultiTermQuery query);
+            public abstract override Query Rewrite(IndexReader reader, MultiTermQuery query);
         }
 
         private sealed class AnonymousScoringSpanQueryRewrite : SpanRewriteMethod
         {
             private sealed class AnonymousScoringRewrite : ScoringRewrite<SpanOrQuery>
             {
-                protected override SpanOrQuery GetTopLevelQuery()
+                protected override SpanOrQuery TopLevelQuery
                 {
-                    return new SpanOrQuery();
+                    get
+                    {
+                        return new SpanOrQuery();
+                    }
                 }
 
                 protected override void CheckMaxClauseCount(int count)
@@ -118,7 +121,7 @@ namespace Lucene.Net.Search.Spans
 
             private readonly ScoringRewrite<SpanOrQuery> _delegate = new AnonymousScoringRewrite();
 
-            public override SpanQuery Rewrite(IndexReader reader, MultiTermQuery query)
+            public override Query Rewrite(IndexReader reader, MultiTermQuery query)
             {
                 return _delegate.Rewrite(reader, query);
             }
@@ -141,9 +144,12 @@ namespace Lucene.Net.Search.Spans
                     get { return Int32.MaxValue; }
                 }
 
-                protected override SpanOrQuery GetTopLevelQuery()
+                protected override SpanOrQuery TopLevelQuery
                 {
-                    return new SpanOrQuery();
+                    get
+                    {
+                        return new SpanOrQuery();
+                    }
                 }
 
                 protected override void AddClause(SpanOrQuery topLevel, Term term, int docFreq, float boost,
@@ -167,7 +173,7 @@ namespace Lucene.Net.Search.Spans
                 return _delegate.Size;
             }
 
-            public override SpanQuery Rewrite(IndexReader reader, MultiTermQuery query)
+            public override Query Rewrite(IndexReader reader, MultiTermQuery query)
             {
                 return _delegate.Rewrite(reader, query);
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs b/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs
index 081f902..f6224e3 100644
--- a/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs
+++ b/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs
@@ -16,7 +16,7 @@ namespace Lucene.Net.Search.Spans
             this.payloadToMatch = payloadToMatch;
         }
 
-        protected override AcceptStatus AcceptPosition(Spans spans)
+        protected override AcceptStatus AcceptPosition(SpansBase spans)
         {
             var result = spans.IsPayloadAvailable();
             if (result == true)
@@ -62,7 +62,7 @@ namespace Lucene.Net.Search.Spans
             return buffer.ToString();
         }
 
-        public override SpanNearPayloadCheckQuery Clone()
+        public override object Clone()
         {
             var result = new SpanNearPayloadCheckQuery((SpanNearQuery) match.clone(), payloadToMatch);
             result.Boost = Boost;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanNearQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanNearQuery.cs b/src/core/Search/Spans/SpanNearQuery.cs
index de83a8b..b945f29 100644
--- a/src/core/Search/Spans/SpanNearQuery.cs
+++ b/src/core/Search/Spans/SpanNearQuery.cs
@@ -129,7 +129,7 @@ namespace Lucene.Net.Search.Spans
             return buffer.ToString();
         }
 
-        public override Spans GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
+        public override SpansBase GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
         {
             if (clauses.Count == 0)
                 // optimize 0-clause case
@@ -139,7 +139,7 @@ namespace Lucene.Net.Search.Spans
                 // optimize 1-clause case
                 return clauses[0].GetSpans(context, acceptDocs, termContexts);
 
-            return inOrder ? (Spans)new NearSpansOrdered(this, context, collectPayloads) : (Spans)new NearSpansUnordered(this, context);
+            return inOrder ? (SpansBase)new NearSpansOrdered(this, context, collectPayloads) : (SpansBase)new NearSpansUnordered(this, context);
         }
 
         public override Query Rewrite(IndexReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanNotQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanNotQuery.cs b/src/core/Search/Spans/SpanNotQuery.cs
index 7173480..1502742 100644
--- a/src/core/Search/Spans/SpanNotQuery.cs
+++ b/src/core/Search/Spans/SpanNotQuery.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Search.Spans
 	[Serializable]
 	public class SpanNotQuery:SpanQuery, ICloneable
 	{
-		private class AnonymousClassSpans : Spans
+		private class AnonymousClassSpans : SpansBase
 		{
 			public AnonymousClassSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts, SpanNotQuery enclosingInstance)
 			{
@@ -52,10 +52,10 @@ namespace Lucene.Net.Search.Spans
 				}
 				
 			}
-			private Spans includeSpans;
+			private SpansBase includeSpans;
 			private bool moreInclude = true;
 			
-			private Spans excludeSpans;
+			private SpansBase excludeSpans;
 			private bool moreExclude;
 			
 			public override bool Next()
@@ -141,9 +141,12 @@ namespace Lucene.Net.Search.Spans
 		        return includeSpans.IsPayloadAvailable();
 		    }
 
-            public override long Cost()
+            public override long Cost
             {
-                return includeSpans.Cost();
+                get
+                {
+                    return includeSpans.Cost;
+                }
             }
 
 		    public override string ToString()
@@ -206,7 +209,7 @@ namespace Lucene.Net.Search.Spans
 		    return spanNotQuery;
 		}
 		
-		public override Spans GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
+		public override SpansBase GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
 		{
 			return new AnonymousClassSpans(context, acceptDocs, termContexts, this);
 		}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanOrQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanOrQuery.cs b/src/core/Search/Spans/SpanOrQuery.cs
index a717ee9..5f41b47 100644
--- a/src/core/Search/Spans/SpanOrQuery.cs
+++ b/src/core/Search/Spans/SpanOrQuery.cs
@@ -32,7 +32,7 @@ namespace Lucene.Net.Search.Spans
 	[Serializable]
 	public class SpanOrQuery : SpanQuery, ICloneable
 	{
-		private class AnonymousClassSpans : Spans
+		private class AnonymousClassSpans : SpansBase
 		{
 			public AnonymousClassSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts, SpanOrQuery enclosingInstance)
 			{
@@ -95,7 +95,7 @@ namespace Lucene.Net.Search.Spans
 				return queue.Size != 0;
 			}
 			
-			private Spans Top()
+			private SpansBase Top()
 			{
 				return queue.Top();
 			}
@@ -143,9 +143,12 @@ namespace Lucene.Net.Search.Spans
 		        get { return Top().End; }
 		    }
 
-		    public override long Cost()
+		    public override long Cost
             {
-                return cost;
+                get
+                {
+                    return cost;
+                }
             }
 
 		    public override ICollection<sbyte[]> GetPayload()
@@ -304,7 +307,7 @@ namespace Lucene.Net.Search.Spans
 		}
 		
 		
-		private class SpanQueue : Util.PriorityQueue<Spans>
+		private class SpanQueue : Util.PriorityQueue<SpansBase>
 		{
 			private SpanOrQuery enclosingInstance;
 			public SpanOrQuery Enclosing_Instance
@@ -320,7 +323,7 @@ namespace Lucene.Net.Search.Spans
                 this.enclosingInstance = enclosingInstance;
 			}
 
-            public override bool LessThan(Spans spans1, Spans spans2)
+            public override bool LessThan(SpansBase spans1, SpansBase spans2)
 			{
 				if (spans1.Doc == spans2.Doc)
 				{
@@ -340,7 +343,7 @@ namespace Lucene.Net.Search.Spans
 			}
 		}
 		
-		public override Spans GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
+		public override SpansBase GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
 		{
 			if (clauses.Count == 1)
 			// optimize 1-clause case

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanPayloadCheckQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanPayloadCheckQuery.cs b/src/core/Search/Spans/SpanPayloadCheckQuery.cs
index f1483d4..d56a91c 100644
--- a/src/core/Search/Spans/SpanPayloadCheckQuery.cs
+++ b/src/core/Search/Spans/SpanPayloadCheckQuery.cs
@@ -18,7 +18,7 @@ namespace Lucene.Net.Search.Spans
             this.payloadToMatch = payloadToMatch;
         }
 
-        protected override AcceptStatus AcceptPosition(Spans spans)
+        protected override AcceptStatus AcceptPosition(SpansBase spans)
         {
             var result = spans.IsPayloadAvailable();
             if (result == true)
@@ -45,11 +45,11 @@ namespace Lucene.Net.Search.Spans
             return AcceptStatus.YES;
         }
 
-        public override string ToString()
+        public override string ToString(string field)
         {
             var buffer = new StringBuilder();
             buffer.Append("spanPayCheck(");
-            buffer.Append(Match.ToString(Field));
+            buffer.Append(Match.ToString(field));
             buffer.Append(", payloadRef: ");
             foreach (var bytes in payloadToMatch)
             {
@@ -61,7 +61,7 @@ namespace Lucene.Net.Search.Spans
             return buffer.ToString();
         }
 
-        public override SpanPayloadCheckQuery Clone()
+        public override object Clone()
         {
             return new SpanPayloadCheckQuery((SpanQuery) Match.Clone(), payloadToMatch) { Boost = Boost };
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanPositionCheckQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanPositionCheckQuery.cs b/src/core/Search/Spans/SpanPositionCheckQuery.cs
index a5c20ae..b24c9ef 100644
--- a/src/core/Search/Spans/SpanPositionCheckQuery.cs
+++ b/src/core/Search/Spans/SpanPositionCheckQuery.cs
@@ -10,18 +10,20 @@ namespace Lucene.Net.Search.Spans
     /// </summary>
     public abstract class SpanPositionCheckQuery : SpanQuery, ICloneable
     {
-        public virtual SpanQuery Match { get; protected set; }
+        protected SpanQuery match;
 
         protected SpanPositionCheckQuery(SpanQuery match)
         {
-            Match = match;
+            this.match = match;
         }
 
-        public override string Field { get { return Match.Field; } }
+        public SpanQuery Match { get { return match; } }
+
+        public override string Field { get { return match.Field; } }
 
         public override void ExtractTerms(ISet<Term> terms)
         {
-            Match.ExtractTerms(terms);
+            match.ExtractTerms(terms);
         }
         
         protected enum AcceptStatus
@@ -43,9 +45,9 @@ namespace Lucene.Net.Search.Spans
             NO_AND_ADVANCE
         }
 
-        protected abstract AcceptStatus AcceptPosition(Spans spans);
+        protected abstract AcceptStatus AcceptPosition(SpansBase spans);
 
-        public override Spans GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
+        public override SpansBase GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
         {
             return new PositionCheckSpan(context, acceptDocs, termContexts);
         }
@@ -58,7 +60,7 @@ namespace Lucene.Net.Search.Spans
             if (rewritten != Match)
             {
                 clone = (SpanPositionCheckQuery) this.Clone();
-                clone.Match = rewritten;
+                clone.match = rewritten;
             }
 
             if (clone != null)
@@ -71,9 +73,9 @@ namespace Lucene.Net.Search.Spans
             }
         }
 
-        protected class PositionCheckSpan : Spans
+        protected class PositionCheckSpan : SpansBase
         {
-            private Spans spans;
+            private SpansBase spans;
 
             private SpanPositionCheckQuery parent;
 
@@ -143,9 +145,12 @@ namespace Lucene.Net.Search.Spans
                 return spans.IsPayloadAvailable();
             }
 
-            public override long Cost()
+            public override long Cost
             {
-                return spans.Cost();
+                get
+                {
+                    return spans.Cost;
+                }
             }
 
             public override string ToString()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanPositionRangeQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanPositionRangeQuery.cs b/src/core/Search/Spans/SpanPositionRangeQuery.cs
index c763b12..3f6bb75 100644
--- a/src/core/Search/Spans/SpanPositionRangeQuery.cs
+++ b/src/core/Search/Spans/SpanPositionRangeQuery.cs
@@ -9,24 +9,24 @@ namespace Lucene.Net.Search.Spans
     /// </summary>
     public class SpanPositionRangeQuery : SpanPositionCheckQuery
     {
-        public int Start { get; protected set; }
-        public int End { get; protected set; }
+        protected int start = 0;
+        protected int end;
 
         public SpanPositionRangeQuery(SpanQuery match, int start, int end)
             : base(match)
         {
-            Start = start;
-            End = end;
+            this.start = start;
+            this.end = end;
         }
 
-        protected override AcceptStatus AcceptPosition(Spans spans)
+        protected override AcceptStatus AcceptPosition(SpansBase spans)
         {
             // assert spans.start() != spans.end();
-            if (spans.Start >= End)
+            if (spans.Start >= end)
             {
                 return AcceptStatus.NO_AND_ADVANCE;
             }
-            else if (spans.Start >= Start && spans.End <= End)
+            else if (spans.Start >= start && spans.End <= end)
             {
                 return AcceptStatus.YES;
             }
@@ -36,13 +36,23 @@ namespace Lucene.Net.Search.Spans
             }
         }
 
+        public int Start
+        {
+            get { return start; }
+        }
+
+        public int End
+        {
+            get { return end; }
+        }
+
         public override string ToString(string field)
         {
             var buffer = new StringBuilder();
             buffer.Append("spanPosRange(");
             buffer.Append(Match.ToString(field));
-            buffer.Append(", ").Append(Start).Append(", ");
-            buffer.Append(End);
+            buffer.Append(", ").Append(start).Append(", ");
+            buffer.Append(end);
             buffer.Append(")");
             buffer.Append(ToStringUtils.Boost(Boost));
             return buffer.ToString();
@@ -50,7 +60,7 @@ namespace Lucene.Net.Search.Spans
 
         public override object Clone()
         {
-            return new SpanPositionRangeQuery((SpanQuery) Match.Clone(), Start, End) {Boost = Boost};
+            return new SpanPositionRangeQuery((SpanQuery) Match.Clone(), start, end) {Boost = Boost};
         }
 
         public override bool Equals(object obj)
@@ -59,7 +69,7 @@ namespace Lucene.Net.Search.Spans
             if (!(obj is SpanPositionRangeQuery)) return false;
 
             var other = obj as SpanPositionRangeQuery;
-            return this.End == other.End && this.Start == other.Start
+            return this.end == other.end && this.start == other.start
                    && this.Match.Equals(other.Match)
                    && this.Boost == other.Boost;
         }
@@ -68,7 +78,7 @@ namespace Lucene.Net.Search.Spans
         {
             int h = Match.GetHashCode();
             h ^= (h << 8) | Number.URShift(h, 25);
-            h ^= Number.FloatToIntBits(Boost) ^ End ^ Start;
+            h ^= Number.FloatToIntBits(Boost) ^ end ^ start;
             return h;
         }
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanQuery.cs b/src/core/Search/Spans/SpanQuery.cs
index 9a93d98..9770345 100644
--- a/src/core/Search/Spans/SpanQuery.cs
+++ b/src/core/Search/Spans/SpanQuery.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Search.Spans
 		/// <summary>Expert: Returns the matches for this query in an index.  Used internally
 		/// to search for spans. 
 		/// </summary>
-		public abstract Spans GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts);
+		public abstract SpansBase GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts);
 
 	    /// <summary>Returns the name of the field matched by this query.</summary>
 	    public abstract string Field { get; }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanScorer.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanScorer.cs b/src/core/Search/Spans/SpanScorer.cs
index 0a417bb..2657542 100644
--- a/src/core/Search/Spans/SpanScorer.cs
+++ b/src/core/Search/Spans/SpanScorer.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Search.Spans
     /// <summary> Public for extension only.</summary>
     public class SpanScorer : Scorer
     {
-        protected Spans spans;
+        protected SpansBase spans;
 
         protected bool more = true;
 
@@ -33,7 +33,7 @@ namespace Lucene.Net.Search.Spans
 
 
 
-        public SpanScorer(Spans spans, Weight weight, Similarity.SloppySimScorer docScorer)
+        public SpanScorer(SpansBase spans, Weight weight, Similarity.SloppySimScorer docScorer)
             : base(weight)
         {
             this.docScorer = docScorer;
@@ -99,9 +99,12 @@ namespace Lucene.Net.Search.Spans
             return docScorer.Score(doc, freq);
         }
 
-        public override int Freq()
+        public override int Freq
         {
-            return numMatches;
+            get
+            {
+                return numMatches;
+            }
         }
 
         public float SloppyFreq()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanTermQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanTermQuery.cs b/src/core/Search/Spans/SpanTermQuery.cs
index 6855a42..0c6fd7f 100644
--- a/src/core/Search/Spans/SpanTermQuery.cs
+++ b/src/core/Search/Spans/SpanTermQuery.cs
@@ -94,7 +94,7 @@ namespace Lucene.Net.Search.Spans
             return true;
         }
 
-        public override Spans GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
+        public override SpansBase GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
         {
             var termContext = termContexts[Term];
             TermState state;
@@ -102,7 +102,7 @@ namespace Lucene.Net.Search.Spans
             {
                 // this happens with span-not query, as it doesn't include the NOT side in extractTerms()
                 // so we seek to the term now in this segment..., this sucks because its ugly mostly!
-                var fields = context.Reader.Fields;
+                var fields = ((AtomicReader)context.Reader).Fields;
                 if (fields != null)
                 {
                     var terms = fields.Terms(Term.Field);
@@ -138,7 +138,7 @@ namespace Lucene.Net.Search.Spans
                 return TermSpans.EMPTY_TERM_SPANS;
             }
 
-            var termsIter = context.Reader.Terms(Term.Field).Iterator(null);
+            var termsIter = ((AtomicReader)context.Reader).Terms(Term.Field).Iterator(null);
             termsIter.SeekExact(Term.Bytes, state);
 
             var postings = termsIter.DocsAndPositions(acceptDocs, null, DocsAndPositionsEnum.FLAG_PAYLOADS);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/SpanWeight.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanWeight.cs b/src/core/Search/Spans/SpanWeight.cs
index 30da178..c4c693b 100644
--- a/src/core/Search/Spans/SpanWeight.cs
+++ b/src/core/Search/Spans/SpanWeight.cs
@@ -66,9 +66,12 @@ namespace Lucene.Net.Search.Spans
             get { return query; }
         }
 
-        public override float GetValueForNormalization()
+        public override float ValueForNormalization
         {
-            return stats == null ? 1.0f : stats.GetValueForNormalization();
+            get
+            {
+                return stats == null ? 1.0f : stats.ValueForNormalization;
+            }
         }
 
         public override void Normalize(float queryNorm, float topLevelBoost)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/Spans.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/Spans.cs b/src/core/Search/Spans/Spans.cs
index 727094a..63704e3 100644
--- a/src/core/Search/Spans/Spans.cs
+++ b/src/core/Search/Spans/Spans.cs
@@ -25,7 +25,8 @@ namespace Lucene.Net.Search.Spans
 	/// are enumerated in order, by increasing document number, within that by
 	/// increasing start position and finally by increasing end position. 
 	/// </summary>
-	public abstract class Spans
+    // .NET Port: Renamed from Spans to SpansBase to avoid Namespace conflict
+	public abstract class SpansBase
 	{
 		/// <summary>Move to the next match, returning true iff any such exists. </summary>
 		public abstract bool Next();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Spans/TermSpans.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/TermSpans.cs b/src/core/Search/Spans/TermSpans.cs
index c19620f..fddd161 100644
--- a/src/core/Search/Spans/TermSpans.cs
+++ b/src/core/Search/Spans/TermSpans.cs
@@ -27,7 +27,7 @@ namespace Lucene.Net.Search.Spans
     /// <summary> Expert:
     /// Public for extension only
     /// </summary>
-    public class TermSpans : Spans
+    public class TermSpans : SpansBase
     {
         protected readonly DocsAndPositionsEnum postings;
         protected readonly Term term;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/TermCollectingRewrite.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TermCollectingRewrite.cs b/src/core/Search/TermCollectingRewrite.cs
index 12dd239..2ed41c5 100644
--- a/src/core/Search/TermCollectingRewrite.cs
+++ b/src/core/Search/TermCollectingRewrite.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Search
             IComparer<BytesRef> lastTermComp = null;
             foreach (AtomicReaderContext context in topReaderContext.Leaves)
             {
-                Fields fields = context.Reader.Fields;
+                Fields fields = ((AtomicReader)context.Reader).Fields;
                 if (fields == null)
                 {
                     // reader has no fields
@@ -79,6 +79,6 @@ namespace Lucene.Net.Search
             public abstract void SetNextEnum(TermsEnum termsEnum);
         }
 
-        public abstract Query Rewrite(IndexReader reader, MultiTermQuery query);
+        public abstract override Query Rewrite(IndexReader reader, MultiTermQuery query);
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/TopFieldCollector.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TopFieldCollector.cs b/src/core/Search/TopFieldCollector.cs
index 0b34309..5ea70e5 100644
--- a/src/core/Search/TopFieldCollector.cs
+++ b/src/core/Search/TopFieldCollector.cs
@@ -233,7 +233,7 @@ namespace Lucene.Net.Search
         private class OutOfOrderOneComparatorScoringNoMaxScoreCollector : OneComparatorScoringNoMaxScoreCollector
         {
 
-            public OutOfOrderOneComparatorScoringNoMaxScoreCollector(FieldValueHitQueue queue, int numHits, bool fillFields)
+            public OutOfOrderOneComparatorScoringNoMaxScoreCollector(FieldValueHitQueue<Entry> queue, int numHits, bool fillFields)
                 : base(queue, numHits, fillFields)
             {
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Support/ByteBuffer.cs
----------------------------------------------------------------------
diff --git a/src/core/Support/ByteBuffer.cs b/src/core/Support/ByteBuffer.cs
index db06fa2..db50787 100644
--- a/src/core/Support/ByteBuffer.cs
+++ b/src/core/Support/ByteBuffer.cs
@@ -22,13 +22,9 @@ namespace Lucene.Net.Support
         {
         }
 
-        public byte[] Array
+        public override object Array
         {
             get { return _data; }
-            private set
-            {
-                _data = value;
-            }
         }
 
         public override int ArrayOffset
@@ -41,15 +37,15 @@ namespace Lucene.Net.Support
             get { return _data != null; }
         }
 
-        public abstract bool IsDirect { get; }
+        public abstract override bool IsDirect { get; }
 
-        public abstract bool IsReadOnly { get; }
+        public abstract override bool IsReadOnly { get; }
 
         public static ByteBuffer Allocate(int capacity)
         {
             return new WrappedByteBuffer(-1, 0, capacity, capacity)
             {
-                Array = new byte[capacity],
+                _data = new byte[capacity],
                 _offset = 0
             };
         }
@@ -60,7 +56,7 @@ namespace Lucene.Net.Support
         {
             return new WrappedByteBuffer(-1, offset, offset + length, array.Length)
             {
-                Array = array,
+                _data = array,
                 _offset = 0
             };
         }
@@ -69,7 +65,7 @@ namespace Lucene.Net.Support
         {
             return new WrappedByteBuffer(-1, 0, array.Length, array.Length)
             {
-                Array = array,
+                _data = array,
                 _offset = 0
             };
         }
@@ -213,7 +209,7 @@ namespace Lucene.Net.Support
             {
                 return new WrappedByteBuffer(-1, 0, Remaining, Remaining)
                 {
-                    Array = this._data,
+                    _data = this._data,
                     _offset = this._offset
                 };
             }
@@ -222,7 +218,7 @@ namespace Lucene.Net.Support
             {
                 return new WrappedByteBuffer(Mark, Position, Limit, Capacity)
                 {
-                    Array = this._data,
+                    _data = this._data,
                     _offset = this._offset
                 };
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/Automaton/State.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Automaton/State.cs b/src/core/Util/Automaton/State.cs
index 88d6a4c..e113105 100644
--- a/src/core/Util/Automaton/State.cs
+++ b/src/core/Util/Automaton/State.cs
@@ -89,6 +89,11 @@ namespace Lucene.Net.Util.Automaton
             {
                 return new TransitionsEnumerator(transitionsArray, numTransitions);
             }
+
+            System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
+            {
+                return GetEnumerator();
+            }
         }
 
         public IEnumerable<Transition> GetTransitions()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/ByteBlockPool.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/ByteBlockPool.cs b/src/core/Util/ByteBlockPool.cs
index 7dde649..4cdbc70 100644
--- a/src/core/Util/ByteBlockPool.cs
+++ b/src/core/Util/ByteBlockPool.cs
@@ -70,12 +70,12 @@ namespace Lucene.Net.Util
                 this.bytesUsed = bytesUsed;
             }
 
-            public override byte[] ByteBlock
+            public override sbyte[] ByteBlock
             {
                 get
                 {
                     bytesUsed.AddAndGet(blockSize);
-                    return new byte[blockSize];
+                    return new sbyte[blockSize];
                 }
             }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/DocIdBitSet.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/DocIdBitSet.cs b/src/core/Util/DocIdBitSet.cs
index ea6500a..8ff840d 100644
--- a/src/core/Util/DocIdBitSet.cs
+++ b/src/core/Util/DocIdBitSet.cs
@@ -71,9 +71,12 @@ namespace Lucene.Net.Util
                 this.docId = -1;
             }
 
-            public override int DocID()
+            public override int DocID
             {
-                return docId;
+                get
+                {
+                    return docId;
+                }
             }
 
             public override int NextDoc()
@@ -93,9 +96,12 @@ namespace Lucene.Net.Util
                 return docId;
             }
 
-            public override long Cost()
+            public override long Cost
             {
-                return bitSet.Length;
+                get
+                {
+                    return bitSet.Length;
+                }
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/Fst/BytesStore.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/BytesStore.cs b/src/core/Util/Fst/BytesStore.cs
index 1ec0a24..ba1c6ce 100644
--- a/src/core/Util/Fst/BytesStore.cs
+++ b/src/core/Util/Fst/BytesStore.cs
@@ -310,17 +310,17 @@ namespace Lucene.Net.Util.Fst
                 nextRead = _parent.blockSize;
             }
 
-            public override sbyte ReadByte()
+            public override byte ReadByte()
             {
                 if (nextRead == _parent.blockSize)
                 {
                     current = _parent.blocks[nextBuffer++];
                     nextRead = 0;
                 }
-                return current[nextRead++];
+                return (byte)current[nextRead++];
             }
 
-            public void SkipBytes(int count)
+            public override void SkipBytes(int count)
             {
                 Position = Position + count;
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/Fst/FSTEnum.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/FSTEnum.cs b/src/core/Util/Fst/FSTEnum.cs
index 46e875d..5c4839a 100644
--- a/src/core/Util/Fst/FSTEnum.cs
+++ b/src/core/Util/Fst/FSTEnum.cs
@@ -4,7 +4,6 @@ using System.Diagnostics;
 namespace Lucene.Net.Util.Fst
 {
     public abstract class FSTEnum<T>
-        where T : class
     {
         protected readonly FST<T> fst;
 
@@ -416,7 +415,7 @@ namespace Lucene.Net.Util.Fst
             }
             if (output.Length <= upto)
             {
-                var newOutput = (T[])new Object[ArrayUtil.Oversize(1 + upto, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+                var newOutput = new T[ArrayUtil.Oversize(1 + upto, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
                 Array.Copy(output, 0, newOutput, 0, output.Length);
                 output = newOutput;
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/Fst/ForwardBytesReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/ForwardBytesReader.cs b/src/core/Util/Fst/ForwardBytesReader.cs
index fa18c84..bcbb357 100644
--- a/src/core/Util/Fst/ForwardBytesReader.cs
+++ b/src/core/Util/Fst/ForwardBytesReader.cs
@@ -23,12 +23,12 @@ namespace Lucene.Net.Util.Fst
             Position += count;
         }
 
-        public override sbyte ReadByte()
+        public override byte ReadByte()
         {
             return bytes[Position++];
         }
 
-        public override void ReadBytes(sbyte[] bytes, int offset, int len)
+        public override void ReadBytes(byte[] bytes, int offset, int len)
         {
             Array.Copy(this.bytes, Position, bytes, offset, len);
             Position += len;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/Fst/ReverseBytesReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/ReverseBytesReader.cs b/src/core/Util/Fst/ReverseBytesReader.cs
index afb4197..ad9c4d1 100644
--- a/src/core/Util/Fst/ReverseBytesReader.cs
+++ b/src/core/Util/Fst/ReverseBytesReader.cs
@@ -10,12 +10,12 @@
             this.bytes = bytes;
         }
 
-        public override sbyte ReadByte()
+        public override byte ReadByte()
         {
             return bytes[Position--];
         }
 
-        public override void ReadBytes(sbyte[] b, int offset, int len)
+        public override void ReadBytes(byte[] b, int offset, int len)
         {
             for (var i = 0; i < len; i++)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/NamedThreadFactory.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/NamedThreadFactory.cs b/src/core/Util/NamedThreadFactory.cs
index 7592d3e..cb13e04 100644
--- a/src/core/Util/NamedThreadFactory.cs
+++ b/src/core/Util/NamedThreadFactory.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Util
             return prefix == null || prefix.Length == 0 ? "Lucene" : prefix;
         }
 
-        public Thread NewThread(IThreadRunnable r)
+        public override Thread NewThread(IThreadRunnable r)
         {
             Thread t = new Thread(r.Run) 
             { 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/OpenBitSetIterator.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/OpenBitSetIterator.cs b/src/core/Util/OpenBitSetIterator.cs
index 6a88ee2..febdd83 100644
--- a/src/core/Util/OpenBitSetIterator.cs
+++ b/src/core/Util/OpenBitSetIterator.cs
@@ -214,14 +214,20 @@ namespace Lucene.Net.Util
             return curDocId = (i << 6) + bitIndex;
         }
 
-        public override int DocID()
+        public override int DocID
         {
-            return curDocId;
+            get
+            {
+                return curDocId;
+            }
         }
 
-        public override long Cost()
+        public override long Cost
         {
-            return words / 64;
+            get
+            {
+                return words / 64;
+            }
         }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/Packed/AppendingLongBuffer.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Packed/AppendingLongBuffer.cs b/src/core/Util/Packed/AppendingLongBuffer.cs
index 0f46775..86f0d89 100644
--- a/src/core/Util/Packed/AppendingLongBuffer.cs
+++ b/src/core/Util/Packed/AppendingLongBuffer.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Util.Packed
             }
         }
 
-        internal void PackPendingValues()
+        internal override void PackPendingValues()
         {
             //assert pendingOff == MAX_PENDING_COUNT;
 
@@ -61,7 +61,7 @@ namespace Lucene.Net.Util.Packed
             }
         }
 
-        internal override Iterator GetIterator()
+        internal override AbstractAppendingLongBuffer.Iterator GetIterator()
         {
             return new Iterator(this);
         }
@@ -76,7 +76,7 @@ namespace Lucene.Net.Util.Packed
                 this.parent = parent;
             }
 
-            void FillValues()
+            internal override void FillValues()
             {
                 if (vOff == parent.valuesOff)
                 {


[45/50] [abbrv] git commit: Fix dumb Java Reader API assertion

Posted by mh...@apache.org.
Fix dumb Java Reader API assertion

if you return 0, that means you're at the end. WHY java, WHY would you
return -1?!


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/c1883768
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/c1883768
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/c1883768

Branch: refs/heads/branch_4x
Commit: c1883768c0c9b367746eafd959113a02f70fa2b6
Parents: 02a37a0
Author: Paul Irwin <pa...@gmail.com>
Authored: Fri Aug 9 14:33:50 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Fri Aug 9 14:33:50 2013 -0400

----------------------------------------------------------------------
 src/contrib/QueryParsers/Classic/FastCharStream.cs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c1883768/src/contrib/QueryParsers/Classic/FastCharStream.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/FastCharStream.cs b/src/contrib/QueryParsers/Classic/FastCharStream.cs
index 6e3a39e..20d7920 100644
--- a/src/contrib/QueryParsers/Classic/FastCharStream.cs
+++ b/src/contrib/QueryParsers/Classic/FastCharStream.cs
@@ -60,7 +60,7 @@ namespace Lucene.Net.QueryParsers.Classic
 
             int charsRead =          // fill space in buffer
               input.Read(buffer, newPosition, buffer.Length - newPosition);
-            if (charsRead == -1)
+            if (charsRead <= 0)
                 throw new IOException("read past eof");
             else
                 bufferLength += charsRead;


[28/50] [abbrv] git commit: Make index writing work, but it is still corrupt

Posted by mh...@apache.org.
Make index writing work, but it is still corrupt


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/ec36d0d5
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/ec36d0d5
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/ec36d0d5

Branch: refs/heads/branch_4x
Commit: ec36d0d5ccabc7e2f17af8b4d25ff5fcaef7a4c2
Parents: 733dc18
Author: Paul Irwin <pa...@gmail.com>
Authored: Tue Aug 6 17:41:56 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Aug 6 17:41:56 2013 -0400

----------------------------------------------------------------------
 src/core/Document/Field.cs                  |  6 +--
 src/core/Document/FieldType.cs              |  4 +-
 src/core/Index/DocValuesProcessor.cs        |  2 +-
 src/core/Index/DocumentsWriterFlushQueue.cs |  2 +-
 src/core/Index/FieldInfos.cs                | 27 +++++++------
 src/core/Index/IIndexableField.cs           |  2 +-
 src/core/Index/IIndexableFieldType.cs       |  2 +-
 src/core/Index/IndexReaderContext.cs        |  2 +-
 src/core/Index/SegmentInfos.cs              |  2 +-
 src/core/Store/BufferedIndexInput.cs        |  8 ++--
 src/core/Store/BufferedIndexOutput.cs       |  2 +-
 src/core/Store/DataOutput.cs                |  2 +-
 src/core/Store/RAMOutputStream.cs           |  2 +-
 src/core/Util/Fst/Builder.cs                |  4 +-
 src/core/Util/Fst/FST.cs                    |  2 +-
 src/core/Util/IOUtils.cs                    |  2 +-
 src/core/Util/NamedSPILoader.cs             |  2 +-
 src/core/Util/SPIClassIterator.cs           | 50 ++++++++++--------------
 18 files changed, 59 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Document/Field.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/Field.cs b/src/core/Document/Field.cs
index 9cc069f..5ed5585 100644
--- a/src/core/Document/Field.cs
+++ b/src/core/Document/Field.cs
@@ -346,13 +346,13 @@ namespace Lucene.Net.Documents
             }
         }
 
-        public object NumericValue
+        public long NumericValue
         {
             get
             {
                 // .NET Port: No base type for all numeric types, so unless we want to rewrite this
                 // to be LongValue, IntValue, FloatValue, etc, this will have to do.
-                return fieldsData;
+                return Convert.ToInt64(fieldsData);
             }
         }
 
@@ -412,7 +412,7 @@ namespace Lucene.Net.Documents
                 }
                 NumericTokenStream nts = (NumericTokenStream)internalTokenStream;
                 // initialize value in TokenStream
-                Number val = (Number)fieldsData;
+                object val = fieldsData;
                 switch (numericType)
                 {
                     case FieldType.NumericType.INT:

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Document/FieldType.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/FieldType.cs b/src/core/Document/FieldType.cs
index f9410ea..1b8c3c5 100644
--- a/src/core/Document/FieldType.cs
+++ b/src/core/Document/FieldType.cs
@@ -40,7 +40,7 @@ namespace Lucene.Net.Documents
         private NumericType? numericType;
         private bool frozen;
         private int numericPrecisionStep = NumericUtils.PRECISION_STEP_DEFAULT;
-        private FieldInfo.DocValuesType docValueType;
+        private FieldInfo.DocValuesType? docValueType;
                 
         public FieldType(FieldType refFieldType)
         {
@@ -255,7 +255,7 @@ namespace Lucene.Net.Documents
             return result.ToString();
         }
 
-        public FieldInfo.DocValuesType DocValueType
+        public FieldInfo.DocValuesType? DocValueType
         {
             get { return docValueType; }
             set

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Index/DocValuesProcessor.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocValuesProcessor.cs b/src/core/Index/DocValuesProcessor.cs
index 4b90f37..dd707e6 100644
--- a/src/core/Index/DocValuesProcessor.cs
+++ b/src/core/Index/DocValuesProcessor.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Index
 
         public override void AddField(int docID, IIndexableField field, FieldInfo fieldInfo)
         {
-            FieldInfo.DocValuesType dvType = field.FieldTypeValue.DocValueType;
+            FieldInfo.DocValuesType? dvType = field.FieldTypeValue.DocValueType;
             if (dvType != null)
             {
                 fieldInfo.DocValuesTypeValue = dvType;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Index/DocumentsWriterFlushQueue.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocumentsWriterFlushQueue.cs b/src/core/Index/DocumentsWriterFlushQueue.cs
index d7df5cb..83c9805 100644
--- a/src/core/Index/DocumentsWriterFlushQueue.cs
+++ b/src/core/Index/DocumentsWriterFlushQueue.cs
@@ -117,7 +117,7 @@ namespace Lucene.Net.Index
                 bool canPublish;
                 lock (this)
                 {
-                    head = queue.Peek();
+                    head = queue.Count > 0 ? queue.Peek() : null;
                     canPublish = head != null && head.CanPublish; // do this synced 
                 }
                 if (canPublish)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Index/FieldInfos.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FieldInfos.cs b/src/core/Index/FieldInfos.cs
index be04140..9609ad4 100644
--- a/src/core/Index/FieldInfos.cs
+++ b/src/core/Index/FieldInfos.cs
@@ -62,17 +62,22 @@ namespace Lucene.Net.Index
 
             foreach (FieldInfo info in infos)
             {
-                FieldInfo previous = byNumber[info.number] = info;
-                if (previous != null)
+                FieldInfo previous;
+
+                if (byNumber.TryGetValue(info.number, out previous))
                 {
                     throw new ArgumentException("duplicate field numbers: " + previous.name + " and " + info.name + " have: " + info.number);
                 }
-                previous = byName[info.name] = info;
-                if (previous != null)
+
+                byNumber[info.number] = info;
+
+                if (byName.TryGetValue(info.name, out previous))
                 {
                     throw new ArgumentException("duplicate field names: " + previous.number + " and " + info.number + " have: " + info.name);
                 }
 
+                byName[info.name] = info;
+
                 hasVectors |= info.HasVectors;
                 hasProx |= info.IsIndexed && info.IndexOptionsValue >= Index.FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
                 hasFreq |= info.IsIndexed && info.IndexOptionsValue != Index.FieldInfo.IndexOptions.DOCS_ONLY;
@@ -165,7 +170,7 @@ namespace Lucene.Net.Index
             // We use this to enforce that a given field never
             // changes DV type, even across segments / IndexWriter
             // sessions:
-            private readonly IDictionary<string, Index.FieldInfo.DocValuesType> docValuesType;
+            private readonly IDictionary<string, Index.FieldInfo.DocValuesType?> docValuesType;
 
             // TODO: we should similarly catch an attempt to turn
             // norms back on after they were already ommitted; today
@@ -176,16 +181,16 @@ namespace Lucene.Net.Index
             {
                 this.nameToNumber = new HashMap<string, int>();
                 this.numberToName = new HashMap<int, string>();
-                this.docValuesType = new HashMap<string, Index.FieldInfo.DocValuesType>();
+                this.docValuesType = new HashMap<string, Index.FieldInfo.DocValuesType?>();
             }
 
-            internal int AddOrGet(string fieldName, int preferredFieldNumber, Index.FieldInfo.DocValuesType dvType)
+            internal int AddOrGet(string fieldName, int preferredFieldNumber, Index.FieldInfo.DocValuesType? dvType)
             {
                 lock (this)
                 {
                     if (dvType != null)
                     {
-                        Index.FieldInfo.DocValuesType currentDVType = docValuesType[fieldName];
+                        Index.FieldInfo.DocValuesType? currentDVType = docValuesType[fieldName];
                         if (currentDVType == null)
                         {
                             docValuesType[fieldName] = dvType;
@@ -195,8 +200,8 @@ namespace Lucene.Net.Index
                             throw new ArgumentException("cannot change DocValues type from " + currentDVType + " to " + dvType + " for field \"" + fieldName + "\"");
                         }
                     }
-                    int fieldNumber = nameToNumber[fieldName];
-                    if (fieldNumber == null)
+                    int fieldNumber;
+                    if (!nameToNumber.TryGetValue(fieldName, out fieldNumber))
                     {
                         int preferredBoxed = preferredFieldNumber; // .NET port: no need to box here
 
@@ -281,7 +286,7 @@ namespace Lucene.Net.Index
             }
 
             private FieldInfo AddOrUpdateInternal(String name, int preferredFieldNumber, bool isIndexed,
-                bool storeTermVector, bool omitNorms, bool storePayloads, Index.FieldInfo.IndexOptions indexOptions, Index.FieldInfo.DocValuesType docValues, Index.FieldInfo.DocValuesType? normType)
+                bool storeTermVector, bool omitNorms, bool storePayloads, Index.FieldInfo.IndexOptions indexOptions, Index.FieldInfo.DocValuesType? docValues, Index.FieldInfo.DocValuesType? normType)
             {
                 FieldInfo fi = FieldInfo(name);
                 if (fi == null)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Index/IIndexableField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IIndexableField.cs b/src/core/Index/IIndexableField.cs
index 29ff853..f7f12f7 100644
--- a/src/core/Index/IIndexableField.cs
+++ b/src/core/Index/IIndexableField.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Index
 
         TextReader ReaderValue { get; }
 
-        object NumericValue { get; }
+        long NumericValue { get; }
 
         TokenStream TokenStream(Analyzer analyzer);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Index/IIndexableFieldType.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IIndexableFieldType.cs b/src/core/Index/IIndexableFieldType.cs
index c0d370c..3a0e57b 100644
--- a/src/core/Index/IIndexableFieldType.cs
+++ b/src/core/Index/IIndexableFieldType.cs
@@ -25,6 +25,6 @@ namespace Lucene.Net.Index
 
         FieldInfo.IndexOptions IndexOptions { get; }
 
-        FieldInfo.DocValuesType DocValueType { get; }
+        FieldInfo.DocValuesType? DocValueType { get; }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Index/IndexReaderContext.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexReaderContext.cs b/src/core/Index/IndexReaderContext.cs
index 3fcbba8..622a020 100644
--- a/src/core/Index/IndexReaderContext.cs
+++ b/src/core/Index/IndexReaderContext.cs
@@ -14,7 +14,7 @@ namespace Lucene.Net.Index
 
         public IndexReaderContext(CompositeReaderContext parent, int ordInParent, int docBaseInParent)
         {
-            if (this.GetType() != typeof(CompositeReaderContext) || this.GetType() != typeof(AtomicReaderContext))
+            if (!(this is CompositeReaderContext || this is AtomicReaderContext))
                 throw new Exception("This class should never be extended by custom code!");
 
             //if (!(this instanceof CompositeReaderContext || this instanceof AtomicReaderContext))

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Index/SegmentInfos.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentInfos.cs b/src/core/Index/SegmentInfos.cs
index caf74a3..72cb341 100644
--- a/src/core/Index/SegmentInfos.cs
+++ b/src/core/Index/SegmentInfos.cs
@@ -159,7 +159,7 @@ namespace Lucene.Net.Index
             }
             else if (fileName.StartsWith(IndexFileNames.SEGMENTS))
             {
-                return long.Parse(fileName.Substring(1 + IndexFileNames.SEGMENTS.Length));
+                return Number.Parse(fileName.Substring(1 + IndexFileNames.SEGMENTS.Length), Character.MAX_RADIX);
             }
             else
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Store/BufferedIndexInput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/BufferedIndexInput.cs b/src/core/Store/BufferedIndexInput.cs
index af155b1..6243a5f 100644
--- a/src/core/Store/BufferedIndexInput.cs
+++ b/src/core/Store/BufferedIndexInput.cs
@@ -123,7 +123,7 @@ namespace Lucene.Net.Store
                 // the buffer contains enough data to satisfy this request
                 if (len > 0)
                     // to allow b to be null if len is 0...
-                    Array.Copy(buffer, bufferPosition, b, offset, len);
+                    Buffer.BlockCopy(buffer, bufferPosition, b, offset, len);
                 bufferPosition += len;
             }
             else
@@ -132,7 +132,7 @@ namespace Lucene.Net.Store
                 int available = bufferLength - bufferPosition;
                 if (available > 0)
                 {
-                    Array.Copy(buffer, bufferPosition, b, offset, available);
+                    Buffer.BlockCopy(buffer, bufferPosition, b, offset, available);
                     offset += available;
                     len -= available;
                     bufferPosition += available;
@@ -147,12 +147,12 @@ namespace Lucene.Net.Store
                     if (bufferLength < len)
                     {
                         // Throw an exception when refill() could not read len bytes:
-                        Array.Copy(buffer, 0, b, offset, bufferLength);
+                        Buffer.BlockCopy(buffer, 0, b, offset, bufferLength);
                         throw new System.IO.IOException("read past EOF");
                     }
                     else
                     {
-                        Array.Copy(buffer, 0, b, offset, len);
+                        Buffer.BlockCopy(buffer, 0, b, offset, len);
                         bufferPosition = len;
                     }
                 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Store/BufferedIndexOutput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/BufferedIndexOutput.cs b/src/core/Store/BufferedIndexOutput.cs
index 79e8dc1..8c9c96f 100644
--- a/src/core/Store/BufferedIndexOutput.cs
+++ b/src/core/Store/BufferedIndexOutput.cs
@@ -70,7 +70,7 @@ namespace Lucene.Net.Store
             if (bytesLeft >= length)
             {
                 // we add the data to the end of the buffer
-                Array.Copy(b, offset, buffer, bufferPosition, length);
+                Buffer.BlockCopy(b, offset, buffer, bufferPosition, length);
                 bufferPosition += length;
                 // if the buffer is full, flush it
                 if (bufferSize - bufferPosition == 0)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Store/DataOutput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/DataOutput.cs b/src/core/Store/DataOutput.cs
index 2368b6f..4310f93 100644
--- a/src/core/Store/DataOutput.cs
+++ b/src/core/Store/DataOutput.cs
@@ -32,7 +32,7 @@ namespace Lucene.Net.Store
         public void WriteByte(sbyte b)
         {
             // helper method to account for java's byte being signed
-            WriteByte((sbyte)b);
+            WriteByte((byte)b);
         }
 
         public virtual void WriteInt(int i)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Store/RAMOutputStream.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/RAMOutputStream.cs b/src/core/Store/RAMOutputStream.cs
index 74103b3..54b81a7 100644
--- a/src/core/Store/RAMOutputStream.cs
+++ b/src/core/Store/RAMOutputStream.cs
@@ -162,7 +162,7 @@ namespace Lucene.Net.Store
 
                 int remainInBuffer = currentBuffer.Length - bufferPosition;
                 int bytesToCopy = len < remainInBuffer ? len : remainInBuffer;
-                Array.Copy(b, offset, currentBuffer, bufferPosition, bytesToCopy);
+                Buffer.BlockCopy(b, offset, currentBuffer, bufferPosition, bytesToCopy);
                 offset += bytesToCopy;
                 len -= bytesToCopy;
                 bufferPosition += bytesToCopy;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Util/Fst/Builder.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/Builder.cs b/src/core/Util/Fst/Builder.cs
index 626b4ab..2fa93be 100644
--- a/src/core/Util/Fst/Builder.cs
+++ b/src/core/Util/Fst/Builder.cs
@@ -415,7 +415,7 @@ namespace Lucene.Net.Util.Fst
             public UnCompiledNode(Builder<T> owner, int depth)
             {
                 _owner = owner;
-                Arcs = new FST<T>.Arc<T>[1] as Arc<T>[];
+                Arcs = new Arc<T>[1];
                 Arcs[0] = new Arc<T>();
                 Output = owner.NO_OUTPUT;
                 _depth = depth;
@@ -447,7 +447,7 @@ namespace Lucene.Net.Util.Fst
                 if (!(label >= 0)) throw new ArgumentException("label must be greater than or equal to zero");
 
                 // TODO: is debug.assert correct here? or is this validation? ...
-                Debug.Assert(NumArcs == 0 || label > Arcs[NumArcs - 1].Label, "arc[-1].label=" + Arcs[NumArcs - 1].Label + " new label=" + label + " numArcs=" + NumArcs);
+                //Debug.Assert(NumArcs == 0 || label > Arcs[NumArcs - 1].Label, "arc[-1].label=" + Arcs[NumArcs - 1].Label + " new label=" + label + " numArcs=" + NumArcs);
 
                 if (NumArcs == Arcs.Length)
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Util/Fst/FST.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/FST.cs b/src/core/Util/Fst/FST.cs
index 96f0cf7..c6cbcd6 100644
--- a/src/core/Util/Fst/FST.cs
+++ b/src/core/Util/Fst/FST.cs
@@ -224,7 +224,7 @@ namespace Lucene.Net.Util.Fst
 
         internal void Save(DataOutput output)
         {
-            if (startNode != -1)
+            if (startNode == -1)
                 throw new InvalidOperationException("call finish first");
             if (NodeAddress != null)
                 throw new InvalidOperationException("cannot save an FST pre-packed FST; it must first be packed");

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Util/IOUtils.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/IOUtils.cs b/src/core/Util/IOUtils.cs
index ab2b960..16bce94 100644
--- a/src/core/Util/IOUtils.cs
+++ b/src/core/Util/IOUtils.cs
@@ -16,7 +16,7 @@ namespace Lucene.Net.Util
             where E : Exception
         {
             // java version has a separate implementation here, but might as well re-use the other one until we can't
-            CloseWhileHandlingException<E>(priorException, objects);
+            CloseWhileHandlingException<E>(priorException, (IEnumerable<IDisposable>)objects);
         }
 
         public static void CloseWhileHandlingException<E>(E priorException, IEnumerable<IDisposable> objects)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Util/NamedSPILoader.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/NamedSPILoader.cs b/src/core/Util/NamedSPILoader.cs
index 21ddce1..e40145f 100644
--- a/src/core/Util/NamedSPILoader.cs
+++ b/src/core/Util/NamedSPILoader.cs
@@ -24,7 +24,7 @@ namespace Lucene.Net.Util
         public void Reload()
         {
             IDictionary<String, S> services = new Dictionary<String, S>(this.services);
-            SPIClassIterator<S> loader = SPIClassIterator<S>.Get(clazz);
+            SPIClassIterator<S> loader = SPIClassIterator<S>.Get();
             
             foreach (Type c in loader)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/ec36d0d5/src/core/Util/SPIClassIterator.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/SPIClassIterator.cs b/src/core/Util/SPIClassIterator.cs
index bbb7a4d..4601b44 100644
--- a/src/core/Util/SPIClassIterator.cs
+++ b/src/core/Util/SPIClassIterator.cs
@@ -1,6 +1,7 @@
 using System;
 using System.Collections.Generic;
 using System.Linq;
+using System.Reflection;
 using System.Text;
 
 namespace Lucene.Net.Util
@@ -9,52 +10,41 @@ namespace Lucene.Net.Util
     /// TODO: Not sure what to do here.
     /// </summary>
     /// <typeparam name="S"></typeparam>
-    public class SPIClassIterator<S> : IEnumerable<Type>, IEnumerator<Type>
+    public class SPIClassIterator<S> : IEnumerable<Type>
     {
-        private static readonly string META_INF_SERVICES = "META-INF/services/";
+        private static List<Type> _types;
 
-        private readonly Type clazz;
-        private readonly IEnumerable<Uri> profilesEnum;
-        private IEnumerator<string> linesIterator;
-
-        public static SPIClassIterator<S> Get(Type clazz)
-        {
-            throw new NotImplementedException();
-        }
-        
-        public Type Current
+        static SPIClassIterator()
         {
-            get { throw new NotImplementedException(); }
-        }
+            _types = new List<Type>();
 
-        public void Dispose()
-        {
-            throw new NotImplementedException();
+            foreach (var assembly in AppDomain.CurrentDomain.GetAssemblies())
+            {
+                foreach (var type in assembly.GetTypes())
+                {
+                    if (typeof(S).IsAssignableFrom(type) && !type.IsAbstract && !type.IsInterface && type.GetConstructor(Type.EmptyTypes) != null)
+                        _types.Add(type);
+                }
+            }
         }
 
-        object System.Collections.IEnumerator.Current
-        {
-            get { throw new NotImplementedException(); }
-        }
+        //private static readonly string META_INF_SERVICES = "META-INF/services/";
 
-        public bool MoveNext()
-        {
-            throw new NotImplementedException();
-        }
+        
 
-        public void Reset()
+        public static SPIClassIterator<S> Get()
         {
-            throw new NotImplementedException();
+            return new SPIClassIterator<S>();
         }
-
+        
         public IEnumerator<Type> GetEnumerator()
         {
-            return this;
+            return _types.GetEnumerator();
         }
 
         System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
         {
-            return this;
+            return GetEnumerator();
         }
     }
 }


[07/50] [abbrv] git commit: Port: more util unit tests

Posted by mh...@apache.org.
Port: more util unit tests


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/27c7d0d9
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/27c7d0d9
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/27c7d0d9

Branch: refs/heads/branch_4x
Commit: 27c7d0d946026166e1ef95a5d93df6bf9ef8d368
Parents: 2e4e00b
Author: James Blair <jm...@gmail.com>
Authored: Thu Jul 11 17:39:36 2013 -0400
Committer: James Blair <jm...@gmail.com>
Committed: Thu Jul 11 17:39:36 2013 -0400

----------------------------------------------------------------------
 test/core/Util/TestPagedBytes.cs                |  12 +-
 test/core/Util/TestPriorityQueue.cs             |  40 ++---
 test/core/Util/TestRamUsageEstimator.cs         | 167 ++++++++++++++-----
 .../Util/TestRamUsageEstimatorOnWildAnimals.cs  |  48 ++++++
 .../Util/TestRecyclingByteBlockAllocator.cs     | 137 +++++++++++++++
 .../core/Util/TestRecyclingIntBlockAllocator.cs | 138 +++++++++++++++
 test/core/Util/TestRollingBuffer.cs             |  83 +++++++++
 test/core/Util/TestSentineIntSet.cs             |  65 ++++++++
 test/core/Util/TestSetOnce.cs                   | 103 ++++++++++++
 9 files changed, 724 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27c7d0d9/test/core/Util/TestPagedBytes.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestPagedBytes.cs b/test/core/Util/TestPagedBytes.cs
index bd80843..4a24369 100644
--- a/test/core/Util/TestPagedBytes.cs
+++ b/test/core/Util/TestPagedBytes.cs
@@ -44,7 +44,7 @@ namespace Lucene.Net.Test.Util
                     }
                 }
 
-                indexOutput.Close();
+                indexOutput.Dispose();
                 IndexInput input = dir.OpenInput("foo", IOContext.DEFAULT);
                 var dataInput = (DataInput)input.Clone();
 
@@ -79,8 +79,8 @@ namespace Lucene.Net.Test.Util
                         assertEquals(answer[pos + byteUpto], slice.bytes[slice.offset + byteUpto]);
                     }
                 }
-                input.Close();
-                dir.Close();
+                input.Dispose();
+                dir.Dispose();
             }
         }
 
@@ -113,7 +113,7 @@ namespace Lucene.Net.Test.Util
                 i += len;
             }
             assertEquals(numBytes, indexOutput.FilePointer);
-            indexOutput.Close();
+            indexOutput.Dispose();
             IndexInput indexInput = dir.OpenInput("foo", IOContext.DEFAULT);
             p.Copy(indexInput, numBytes);
             var reader = p.Freeze(random.NextBool());
@@ -124,8 +124,8 @@ namespace Lucene.Net.Test.Util
                 reader.FillSlice(b, offset, 1);
                 assertEquals(arr[(int)(offset % arr.Length)], b.bytes[b.offset]);
             }
-            indexInput.Close();
-            dir.Close();
+            indexInput.Dispose();
+            dir.Dispose();
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27c7d0d9/test/core/Util/TestPriorityQueue.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestPriorityQueue.cs b/test/core/Util/TestPriorityQueue.cs
index 4a95578..de236b6 100644
--- a/test/core/Util/TestPriorityQueue.cs
+++ b/test/core/Util/TestPriorityQueue.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Util
 {
 	
 	[TestFixture]
-	public class TestPriorityQueue:LuceneTestCase
+	public class TestPriorityQueue : LuceneTestCase
 	{
         private class IntegerQueue : PriorityQueue<int?>
         {
             public IntegerQueue(int count)
+                : base(count)
             {
-                Initialize(count);
             }
 
             public override bool LessThan(int? a, int? b)
@@ -41,16 +41,16 @@ namespace Lucene.Net.Util
 		[Test]
 		public virtual void  TestPQ()
 		{
-			TestPQ(10000, NewRandom());
+			TestPQ(AtLeast(10000), new Random());
 		}
 		
-		public static void  TestPQ(int count, System.Random gen)
+		public static void  TestPQ(int count, Random gen)
 		{
 			PriorityQueue<int?> pq = new IntegerQueue(count);
 		    int sum = 0;
             int? sum2 = 0;
 			
-			for (int i = 0; i < count; i++)
+			for (var i = 0; i < count; i++)
 			{
 				int next = gen.Next();
 				sum += next;
@@ -65,9 +65,9 @@ namespace Lucene.Net.Util
 			//      start = new Date();
 			
 			int? last = int.MinValue;
-			for (int i = 0; i < count; i++)
+			for (var i = 0; i < count; i++)
 			{
-				int? next = pq.Pop();
+				var next = pq.Pop();
 				Assert.IsTrue(next >= last);
 				last = next;
 				sum2 += last;
@@ -87,9 +87,9 @@ namespace Lucene.Net.Util
 			pq.Add(2);
             pq.Add(3);
             pq.Add(1);
-			Assert.AreEqual(3, pq.Size());
+			Assert.AreEqual(3, pq.Size);
 			pq.Clear();
-			Assert.AreEqual(0, pq.Size());
+			Assert.AreEqual(0, pq.Size);
 		}
 		
 		[Test]
@@ -102,29 +102,29 @@ namespace Lucene.Net.Util
             pq.InsertWithOverflow(5);
             pq.InsertWithOverflow(7);
             pq.InsertWithOverflow(1);
-			Assert.AreEqual(3, pq.Size());
+			Assert.AreEqual(3, pq.Size);
 			Assert.AreEqual(3, pq.Top());
 		}
 		
 		[Test]
 		public virtual void  TestInsertWithOverflow()
 		{
-			int size = 4;
+			var size = 4;
 			PriorityQueue<int?> pq = new IntegerQueue(size);
-			System.Int32 i1 = 2;
-			System.Int32 i2 = 3;
-			System.Int32 i3 = 1;
-			System.Int32 i4 = 5;
-			System.Int32 i5 = 7;
-			System.Int32 i6 = 1;
+			var i1 = 2;
+			var i2 = 3;
+			var i3 = 1;
+			var i4 = 5;
+			var i5 = 7;
+			var i6 = 1;
 			
 			Assert.IsNull(pq.InsertWithOverflow(i1));
 			Assert.IsNull(pq.InsertWithOverflow(i2));
 			Assert.IsNull(pq.InsertWithOverflow(i3));
 			Assert.IsNull(pq.InsertWithOverflow(i4));
-			Assert.IsTrue((int) pq.InsertWithOverflow(i5) == i3); // i3 should have been dropped
-			Assert.IsTrue((int) pq.InsertWithOverflow(i6) == i6); // i6 should not have been inserted
-			Assert.AreEqual(size, pq.Size());
+			Assert.IsTrue(pq.InsertWithOverflow(i5) == i3); // i3 should have been dropped
+			Assert.IsTrue(pq.InsertWithOverflow(i6) == i6); // i6 should not have been inserted
+			Assert.AreEqual(size, pq.Size);
 			Assert.AreEqual(2, pq.Top());
 		}
 	}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27c7d0d9/test/core/Util/TestRamUsageEstimator.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestRamUsageEstimator.cs b/test/core/Util/TestRamUsageEstimator.cs
index 3ebe8a2..1cef890 100644
--- a/test/core/Util/TestRamUsageEstimator.cs
+++ b/test/core/Util/TestRamUsageEstimator.cs
@@ -15,54 +15,135 @@
  * limitations under the License.
  */
 
-using System;
 
+using System;
 using NUnit.Framework;
 
 namespace Lucene.Net.Util
 {
-	
+
     [TestFixture]
-	public class TestRamUsageEstimator
-	{
-		
+    public class TestRamUsageEstimator
+    {
+
+        [Test]
+        public void TestSanity()
+        {
+            Assert.IsTrue(RamUsageEstimator.SizeOf("test string") > RamUsageEstimator.ShallowSizeOfInstance(typeof(string)));
+
+            var holder = new Holder { holder = new Holder("string2", 5000L) };
+            Assert.IsTrue(RamUsageEstimator.SizeOf(holder) > RamUsageEstimator.ShallowSizeOfInstance(typeof(Holder)));
+            Assert.IsTrue(RamUsageEstimator.SizeOf(holder) > RamUsageEstimator.SizeOf(holder.holder));
+
+            Assert.IsTrue(RamUsageEstimator.ShallowSizeOfInstance(typeof(HolderSubclass)) >= RamUsageEstimator.ShallowSizeOfInstance(typeof(Holder)));
+            Assert.IsTrue(RamUsageEstimator.ShallowSizeOfInstance(typeof(Holder)) == RamUsageEstimator.ShallowSizeOfInstance(typeof(HolderSubclass2)));
+
+            var strings = new string[] {
+                    "test string",
+                    "hollow", 
+                    "catchmaster"
+                };
+            Assert.IsTrue(RamUsageEstimator.SizeOf(strings) > RamUsageEstimator.ShallowSizeOf(strings));
+        }
+
+        [Test]
+        public void TestStaticOverloads()
+        {
+            var rnd = new Random();
+            {
+                var array = new byte[rnd.Next(1024)];
+                Assert.AreEqual(RamUsageEstimator.SizeOf(array), RamUsageEstimator.SizeOf((Object)array));
+            }
+
+            {
+                var array = new bool[rnd.Next(1024)];
+                Assert.AreEqual(RamUsageEstimator.SizeOf(array), RamUsageEstimator.SizeOf((Object)array));
+            }
+
+            {
+                var array = new char[rnd.Next(1024)];
+                Assert.AreEqual(RamUsageEstimator.SizeOf(array), RamUsageEstimator.SizeOf((Object)array));
+            }
+
+            {
+                var array = new short[rnd.Next(1024)];
+                Assert.AreEqual(RamUsageEstimator.SizeOf(array), RamUsageEstimator.SizeOf((Object)array));
+            }
+
+            {
+                var array = new int[rnd.Next(1024)];
+                Assert.AreEqual(RamUsageEstimator.SizeOf(array), RamUsageEstimator.SizeOf((Object)array));
+            }
+
+            {
+                var array = new float[rnd.Next(1024)];
+                Assert.AreEqual(RamUsageEstimator.SizeOf(array), RamUsageEstimator.SizeOf((Object)array));
+            }
+
+            {
+                var array = new long[rnd.Next(1024)];
+                Assert.AreEqual(RamUsageEstimator.SizeOf(array), RamUsageEstimator.SizeOf((Object)array));
+            }
+
+            {
+                var array = new double[rnd.Next(1024)];
+                Assert.AreEqual(RamUsageEstimator.SizeOf(array), RamUsageEstimator.SizeOf((Object)array));
+            }
+        }
+
         [Test]
-		public virtual void  TestBasic()
-		{
-			System.String string_Renamed = new System.Text.StringBuilder("test str").ToString();
-			RamUsageEstimator rue = new RamUsageEstimator();
-			long size = rue.EstimateRamUsage(string_Renamed);
-			System.Console.Out.WriteLine("size:" + size);
-			
-			string_Renamed = new System.Text.StringBuilder("test strin").ToString();
-			size = rue.EstimateRamUsage(string_Renamed);
-			System.Console.Out.WriteLine("size:" + size);
-			
-			Holder holder = new Holder();
-			holder.holder = new Holder("string2", 5000L);
-			size = rue.EstimateRamUsage(holder);
-			System.Console.Out.WriteLine("size:" + size);
-			
-			System.String[] strings = new System.String[]{new System.Text.StringBuilder("test strin").ToString(), new System.Text.StringBuilder("hollow").ToString(), new System.Text.StringBuilder("catchmaster").ToString()};
-			size = rue.EstimateRamUsage(strings);
-			System.Console.Out.WriteLine("size:" + size);
-		}
-		
-		private sealed class Holder
-		{
-			internal long field1 = 5000L;
-			internal System.String name = "name";
-			internal Holder holder;
-			
-			internal Holder()
-			{
-			}
-			
-			internal Holder(System.String name, long field1)
-			{
-				this.name = name;
-				this.field1 = field1;
-			}
-		}
-	}
+        public void TestReferenceSize()
+        {
+            if (!IsSupportedJVM())
+            {
+                Console.Error.WriteLine("WARN: Your JVM does not support certain Oracle/Sun extensions.");
+                Console.Error.WriteLine(" Memory estimates may be inaccurate.");
+                Console.Error.WriteLine(" Please report this to the Lucene mailing list.");
+                Console.Error.WriteLine("JVM version: " + RamUsageEstimator.JVM_INFO_STRING);
+                Console.Error.WriteLine("UnsupportedFeatures:");
+                foreach (var f in RamUsageEstimator.GetUnsupportedFeatures())
+                {
+                    Console.Error.Write(" - " + f.ToString());
+                    if (f == RamUsageEstimator.JvmFeature.OBJECT_ALIGNMENT)
+                    {
+                        Console.Error.Write("; Please note: 32bit Oracle/Sun VMs don't allow exact OBJECT_ALIGNMENT retrieval, this is a known issue.");
+                    }
+                    Console.Error.WriteLine();
+                }
+            }
+
+            Assert.IsTrue(RamUsageEstimator.NUM_BYTES_OBJECT_REF == 4 || RamUsageEstimator.NUM_BYTES_OBJECT_REF == 8);
+            if (!Constants.JRE_IS_64BIT)
+            {
+                Assert.AreEqual(4, RamUsageEstimator.NUM_BYTES_OBJECT_REF, "For 32bit JVMs, reference size must always be 4?");
+            }
+        }
+
+        private class Holder
+        {
+            long field1 = 5000L;
+            string name = "name";
+            public Holder holder;
+            long field2, field3, field4;
+
+            public Holder() { }
+
+            public Holder(string name, long field1)
+            {
+                this.name = name;
+                this.field1 = field1;
+            }
+        }
+
+        private class HolderSubclass : Holder
+        {
+            byte foo;
+            int bar;
+        }
+
+        private class HolderSubclass2 : Holder
+        {
+            // empty, only inherits all fields -> size should be identical to superclass
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27c7d0d9/test/core/Util/TestRamUsageEstimatorOnWildAnimals.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestRamUsageEstimatorOnWildAnimals.cs b/test/core/Util/TestRamUsageEstimatorOnWildAnimals.cs
new file mode 100644
index 0000000..73351f9
--- /dev/null
+++ b/test/core/Util/TestRamUsageEstimatorOnWildAnimals.cs
@@ -0,0 +1,48 @@
+using System;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestRamUsageEstimatorOnWildAnimals : LuceneTestCase
+    {
+        public class ListElement
+        {
+            internal ListElement next;
+        }
+
+        [Test]
+        public void TestOverflowMaxChainLength()
+        {
+            var UPPERLIMIT = 100000;
+            var lower = 0;
+            var upper = UPPERLIMIT;
+
+            while (lower + 1 < upper)
+            {
+                var mid = (lower + upper) / 2;
+                try
+                {
+                    var first = new ListElement();
+                    var last = first;
+                    for (var i = 0; i < mid; i++)
+                    {
+                        last = (last.next = new ListElement());
+                    }
+                    RamUsageEstimator.SizeOf(first); // cause SOE or pass.
+                    lower = mid;
+                }
+                catch (StackOverflowException e)
+                {
+                    upper = mid;
+                }
+            }
+
+            if (lower + 1 < UPPERLIMIT)
+            {
+                Assert.Fail("Max object chain length till stack overflow: " + lower);
+            }
+        }  
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27c7d0d9/test/core/Util/TestRecyclingByteBlockAllocator.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestRecyclingByteBlockAllocator.cs b/test/core/Util/TestRecyclingByteBlockAllocator.cs
new file mode 100644
index 0000000..fed7d04
--- /dev/null
+++ b/test/core/Util/TestRecyclingByteBlockAllocator.cs
@@ -0,0 +1,137 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestRecyclingByteBlockAllocator : LuceneTestCase
+    {
+        private Random random = new Random();
+
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+        }
+
+        private RecyclingByteBlockAllocator newAllocator()
+        {
+            return new RecyclingByteBlockAllocator(1 << (2 + random.Next(15)),
+                random.Next(97), Counter.NewCounter());
+        }
+
+        [Test]
+        public void testAllocate()
+        {
+            var allocator = newAllocator();
+            var set = new HashSet<sbyte[]>();
+            var block = allocator.ByteBlock;
+            set.Add(block);
+            assertNotNull(block);
+            var size = block.Length;
+
+            int num = AtLeast(97);
+            for (var i = 0; i < num; i++)
+            {
+                block = allocator.ByteBlock;
+                assertNotNull(block);
+                assertEquals(size, block.Length);
+                assertTrue("block is returned twice", set.Add(block));
+                assertEquals(size * (i + 2), allocator.BytesUsed); // zero based + 1
+                assertEquals(0, allocator.NumBufferedBlocks);
+            }
+        }
+
+        [Test]
+        public void TestAllocateAndRecycle()
+        {
+            var allocator = newAllocator();
+            var allocated = new HashSet<sbyte[]>();
+
+            var block = allocator.ByteBlock;
+            allocated.Add(block);
+            assertNotNull(block);
+            var size = block.Length;
+
+            int numIters = AtLeast(97);
+            for (var i = 0; i < numIters; i++)
+            {
+                var num = 1 + random.Next(39);
+                for (var j = 0; j < num; j++)
+                {
+                    block = allocator.ByteBlock;
+                    assertNotNull(block);
+                    assertEquals(size, block.Length);
+                    assertTrue("block is returned twice", allocated.Add(block));
+                    assertEquals(size * (allocated.Count + allocator.NumBufferedBlocks), allocator
+                        .BytesUsed);
+                }
+                var array = allocated.ToArray();
+                var begin = random.Next(array.Length);
+                var end = begin + random.Next(array.Length - begin);
+                var selected = new List<sbyte[]>();
+                for (var j = begin; j < end; j++)
+                {
+                    selected.Add(array[j]);
+                }
+                allocator.RecycleByteBlocks(array, begin, end);
+                for (var j = begin; j < end; j++)
+                {
+                    assertNull(array[j]);
+                    var b = selected.Remove(new sbyte[] {0});
+                    assertTrue(allocated.Remove(new sbyte[] { b }));
+                }
+            }
+        }
+
+        [Test]
+        public void TestAllocateAndFree()
+        {
+            var allocator = newAllocator();
+            var allocated = new HashSet<sbyte[]>();
+            var freeButAllocated = 0;
+            var block = allocator.ByteBlock;
+            allocated.Add(block);
+            assertNotNull(block);
+            var size = block.Length;
+
+            int numIters = AtLeast(97);
+            for (var i = 0; i < numIters; i++)
+            {
+                var num = 1 + random.Next(39);
+                for (var j = 0; j < num; j++)
+                {
+                    block = allocator.ByteBlock;
+                    freeButAllocated = Math.Max(0, freeButAllocated - 1);
+                    assertNotNull(block);
+                    assertEquals(size, block.Length);
+                    assertTrue("block is returned twice", allocated.Add(block));
+                    assertEquals(size * (allocated.Count + allocator.NumBufferedBlocks),
+                        allocator.BytesUsed);
+                }
+
+                var array = allocated.ToArray();
+                var begin = random.Next(array.Length);
+                var end = begin + random.Next(array.Length - begin);
+                for (var j = begin; j < end; j++)
+                {
+                    var b = array[j];
+                    assertTrue(allocated.Remove(b));
+                }
+                allocator.RecycleByteBlocks(array, begin, end);
+                for (var j = begin; j < end; j++)
+                {
+                    assertNull(array[j]);
+                }
+                // randomly free blocks
+                var numFreeBlocks = allocator.NumBufferedBlocks;
+                var freeBlocks = allocator.FreeBlocks(random.Next(7 + allocator
+                    .MaxBufferedBlocks));
+                assertEquals(allocator.NumBufferedBlocks, numFreeBlocks - freeBlocks);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27c7d0d9/test/core/Util/TestRecyclingIntBlockAllocator.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestRecyclingIntBlockAllocator.cs b/test/core/Util/TestRecyclingIntBlockAllocator.cs
new file mode 100644
index 0000000..8c923f9
--- /dev/null
+++ b/test/core/Util/TestRecyclingIntBlockAllocator.cs
@@ -0,0 +1,138 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestRecyclingIntBlockAllocator : LuceneTestCase
+    {
+        private Random random = new Random();
+
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+        }
+
+        private RecyclingIntBlockAllocator NewAllocator()
+        {
+            return new RecyclingIntBlockAllocator(1 << (2 + random.Next(15)),
+                random.Next(97), Counter.NewCounter());
+        }
+
+        [Test]
+        public void TestAllocate()
+        {
+            var allocator = NewAllocator();
+            var set = new HashSet<int[]>();
+            var block = allocator.IntBlock;
+            set.Add(block);
+            assertNotNull(block);
+            var size = block.Length;
+
+            int num = AtLeast(97);
+            for (var i = 0; i < num; i++)
+            {
+                block = allocator.IntBlock;
+                assertNotNull(block);
+                assertEquals(size, block.Length);
+                assertTrue("block is returned twice", set.Add(block));
+                assertEquals(4 * size * (i + 2), allocator.BytesUsed); // zero based + 1
+                assertEquals(0, allocator.NumBufferedBlocks);
+            }
+        }
+
+        [Test]
+        public void TestAllocateAndRecycle()
+        {
+            var allocator = NewAllocator();
+            var allocated = new HashSet<int[]>();
+
+            var block = allocator.IntBlock;
+            allocated.Add(block);
+            assertNotNull(block);
+            var size = block.Length;
+
+            int numIters = AtLeast(97);
+            for (var i = 0; i < numIters; i++)
+            {
+                var num = 1 + random.Next(39);
+                for (var j = 0; j < num; j++)
+                {
+                    block = allocator.IntBlock;
+                    assertNotNull(block);
+                    assertEquals(size, block.Length);
+                    assertTrue("block is returned twice", allocated.Add(block));
+                    assertEquals(4 * size * (allocated.Count + allocator.NumBufferedBlocks), allocator
+                        .BytesUsed);
+                }
+                var array = allocated.ToArray();
+                var begin = random.Next(array.Length);
+                var end = begin + random.Next(array.Length - begin);
+                var selected = new List<int[]>();
+                for (var j = begin; j < end; j++)
+                {
+                    selected.Add(array[j]);
+                }
+                allocator.RecycleIntBlocks(array, begin, end);
+                for (var j = begin; j < end; j++)
+                {
+                    assertNull(array[j]);
+                    int[] b = selected.Remove(0);
+                    assertTrue(allocated.Remove(b));
+                }
+            }
+        }
+
+        [Test]
+        public void TestAllocateAndFree()
+        {
+            var allocator = NewAllocator();
+            var allocated = new HashSet<int[]>();
+            var freeButAllocated = 0;
+            var block = allocator.IntBlock;
+            allocated.Add(block);
+            assertNotNull(block);
+            var size = block.Length;
+
+            int numIters = AtLeast(97);
+            for (var i = 0; i < numIters; i++)
+            {
+                var num = 1 + random.Next(39);
+                for (var j = 0; j < num; j++)
+                {
+                    block = allocator.IntBlock;
+                    freeButAllocated = Math.Max(0, freeButAllocated - 1);
+                    assertNotNull(block);
+                    assertEquals(size, block.Length);
+                    assertTrue("block is returned twice", allocated.Add(block));
+                    assertEquals("" + (4 * size * (allocated.Count + allocator.NumBufferedBlocks) - allocator.BytesUsed),
+                        4 * size * (allocated.Count + allocator.NumBufferedBlocks),
+                        allocator.BytesUsed);
+                }
+
+                var array = allocated.ToArray();
+                var begin = random.Next(array.Length);
+                var end = begin + random.Next(array.Length - begin);
+                for (var j = begin; j < end; j++)
+                {
+                    var b = array[j];
+                    assertTrue(allocated.Remove(b));
+                }
+                allocator.RecycleIntBlocks(array, begin, end);
+                for (var j = begin; j < end; j++)
+                {
+                    assertNull(array[j]);
+                }
+                // randomly free blocks
+                var numFreeBlocks = allocator.NumBufferedBlocks;
+                var freeBlocks = allocator.FreeBlocks(random.Next(7 + allocator
+                    .MaxBufferedBlocks));
+                assertEquals(allocator.NumBufferedBlocks, numFreeBlocks - freeBlocks);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27c7d0d9/test/core/Util/TestRollingBuffer.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestRollingBuffer.cs b/test/core/Util/TestRollingBuffer.cs
new file mode 100644
index 0000000..b68b3e4
--- /dev/null
+++ b/test/core/Util/TestRollingBuffer.cs
@@ -0,0 +1,83 @@
+using System;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestRollingBuffer : LuceneTestCase
+    {
+        private class Position : RollingBuffer.Resettable
+        {
+            public int pos;
+
+            public void Reset()
+            {
+                pos = -1;
+            }
+        }
+
+        private sealed class AnonymousRollingBuffer : RollingBuffer<Position>
+        {
+            protected override Position NewInstance()
+            {
+                var pos = new Position { pos = -1 };
+                return pos;
+            }
+        }
+
+        [Test]
+        public void Test()
+        {
+            RollingBuffer<Position> buffer = new AnonymousRollingBuffer();
+
+            for (var iter = 0; iter < 100 * RANDOM_MULTIPLIER; iter++)
+            {
+
+                var freeBeforePos = 0;
+                int maxPos = AtLeast(10000);
+                var posSet = new FixedBitSet(maxPos + 1000);
+                var posUpto = 0;
+                var random = new Random();
+                while (freeBeforePos < maxPos)
+                {
+                    if (random.Next(4) == 1)
+                    {
+                        var limit = Rarely() ? 1000 : 20;
+                        var inc = random.Next(limit);
+                        var pos = freeBeforePos + inc;
+                        posUpto = Math.Max(posUpto, pos);
+                        if (VERBOSE)
+                        {
+                            Console.WriteLine("  check pos=" + pos + " posUpto=" + posUpto);
+                        }
+                        var posData = buffer.Get(pos);
+                        if (!posSet.GetAndSet(pos))
+                        {
+                            assertEquals(-1, posData.pos);
+                            posData.pos = pos;
+                        }
+                        else
+                        {
+                            assertEquals(pos, posData.pos);
+                        }
+                    }
+                    else
+                    {
+                        if (posUpto > freeBeforePos)
+                        {
+                            freeBeforePos += random.Next(posUpto - freeBeforePos);
+                        }
+                        if (VERBOSE)
+                        {
+                            Console.WriteLine("  freeBeforePos=" + freeBeforePos);
+                        }
+                        buffer.FreeBefore(freeBeforePos);
+                    }
+                }
+
+                buffer.Reset();
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27c7d0d9/test/core/Util/TestSentineIntSet.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestSentineIntSet.cs b/test/core/Util/TestSentineIntSet.cs
new file mode 100644
index 0000000..5120ae8
--- /dev/null
+++ b/test/core/Util/TestSentineIntSet.cs
@@ -0,0 +1,65 @@
+using System;
+using System.Collections.Generic;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestSentineIntSet : LuceneTestCase
+    {
+        private Random random = new Random();
+
+        [Test]
+        public void test()
+        {
+            var set = new SentinelIntSet(10, -1);
+            Assert.IsFalse(set.Exists(50));
+            set.Put(50);
+            assertTrue(set.Exists(50));
+            assertEquals(1, set.Size);
+            assertEquals(-11, set.Find(10));
+            assertEquals(1, set.Size);
+            set.Clear();
+            assertEquals(0, set.Size);
+            assertEquals(50, set.Hash(50));
+            //force a rehash
+            for (int i = 0; i < 20; i++)
+            {
+                set.Put(i);
+            }
+            assertEquals(20, set.Size);
+            assertEquals(24, set.rehashCount);
+        }
+
+
+        [Test]
+        public void TestRandom()
+        {
+            for (var i = 0; i < 10000; i++)
+            {
+                var initSz = random.Next(20);
+                var num = random.Next(30);
+                var maxVal = (random.NextBool() ? random.Next(50) : random.Next(int.MaxValue)) + 1;
+
+                var a = new HashSet<int>();
+                var b = new SentinelIntSet(initSz, -1);
+
+                for (var j = 0; j < num; j++)
+                {
+                    var val = random.Next(maxVal);
+                    var exists = !a.Add(val);
+                    var existsB = b.Exists(val);
+                    assertEquals(exists, existsB);
+                    var slot = b.Find(val);
+                    assertEquals(exists, slot >= 0);
+                    b.Put(val);
+
+                    assertEquals(a.Count, b.Size);
+                }
+            }
+
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/27c7d0d9/test/core/Util/TestSetOnce.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestSetOnce.cs b/test/core/Util/TestSetOnce.cs
new file mode 100644
index 0000000..8df4ce3
--- /dev/null
+++ b/test/core/Util/TestSetOnce.cs
@@ -0,0 +1,103 @@
+using System;
+using System.Threading;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestSetOnce : LuceneTestCase
+    {
+        private class SetOnceThread : ThreadClass
+        {
+            internal SetOnce<int> set;
+            internal bool success = false;
+            internal Random RAND;
+
+            public SetOnceThread(Random random)
+            {
+                RAND = new Random(random.Next());
+            }
+
+            public override void Run()
+            {
+                try
+                {
+                    Sleep(RAND.Next(10)); // sleep for a short time
+                    set.Set(int.Parse(Name.Substring(2)));
+                    success = true;
+                }
+                catch (ThreadInterruptedException e)
+                {
+                    // ignore
+                }
+                catch (SystemException e)
+                {
+                    // TODO: change exception type
+                    // expected.
+                    success = false;
+                }
+            }
+        }
+
+        //@Test
+        [Test]
+        public void TestEmptyCtor()
+        {
+            var set = new SetOnce<int>();
+            assertNull(set.Get());
+        }
+
+        //@Test(expected=SetOnce<>.AlreadySetException.class)
+        [Test]
+        [ExpectedException(typeof(SetOnce<>.AlreadySetException))]
+        public void TestSettingCtor()
+        {
+            var set = new SetOnce<int>(5);
+            assertEquals(5, set.Get());
+            set.Set(7);
+        }
+
+        //@Test(expected=SetOnce<>.AlreadySetException.class)
+        [Test]
+        [ExpectedException(typeof(SetOnce<>.AlreadySetException))]
+        public void testSetOnce()
+        {
+            var set = new SetOnce<int>();
+            set.Set(5);
+            assertEquals(5, set.Get());
+            set.Set(7);
+        }
+
+        [Test]
+        public void TestSetMultiThreaded()
+        {
+            var set = new SetOnce<int>();
+            var threads = new SetOnceThread[10];
+            for (var i = 0; i < threads.Length; i++)
+            {
+                threads[i] = new SetOnceThread(new Random()) {Name = "t-" + (i + 1), set = set};
+            }
+
+            foreach (var t in threads)
+            {
+                t.Start();
+            }
+
+            foreach (var t in threads)
+            {
+                t.Join();
+            }
+
+            foreach (SetOnceThread t in threads)
+            {
+                if (t.success)
+                {
+                    var expectedVal = int.Parse(t.Name.Substring(2));
+                    assertEquals("thread " + t.Name, expectedVal, t.set.Get());
+                }
+            }
+        }
+    }
+}


[32/50] [abbrv] git commit: Correct bug preventing search from working

Posted by mh...@apache.org.
Correct bug preventing search from working


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/37289caa
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/37289caa
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/37289caa

Branch: refs/heads/branch_4x
Commit: 37289caa48c5b2887ea15fca8f23efd629bd55ac
Parents: 02797a9
Author: Paul Irwin <pa...@gmail.com>
Authored: Wed Aug 7 09:16:17 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Wed Aug 7 09:16:17 2013 -0400

----------------------------------------------------------------------
 src/core/Index/CompositeReaderContext.cs | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/37289caa/src/core/Index/CompositeReaderContext.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CompositeReaderContext.cs b/src/core/Index/CompositeReaderContext.cs
index eb965fa..844475e 100644
--- a/src/core/Index/CompositeReaderContext.cs
+++ b/src/core/Index/CompositeReaderContext.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Index
          * not top-level readers in the current context
          */
         internal CompositeReaderContext(CompositeReaderContext parent, CompositeReader reader, int ordInParent,
-            int docbaseInParent, List<IndexReaderContext> children)
+            int docbaseInParent, IList<IndexReaderContext> children)
             : this(parent, reader, ordInParent, docbaseInParent, children, null)
         {
         }
@@ -29,7 +29,7 @@ namespace Lucene.Net.Index
         /**
          * Creates a {@link CompositeReaderContext} for top-level readers with parent set to <code>null</code>
          */
-        internal CompositeReaderContext(CompositeReader reader, List<IndexReaderContext> children, List<AtomicReaderContext> leaves)
+        internal CompositeReaderContext(CompositeReader reader, IList<IndexReaderContext> children, IList<AtomicReaderContext> leaves)
             : this(null, reader, 0, 0, children, leaves)
         {
         }
@@ -39,7 +39,7 @@ namespace Lucene.Net.Index
             : base(parent, ordInParent, docbaseInParent)
         {
             this.children = children.ToArray();
-            this.leaves = leaves == null ? null : leaves.ToArray();
+            this.leaves = leaves == null ? null : leaves;
             this.reader = reader;
         }
         
@@ -73,7 +73,7 @@ namespace Lucene.Net.Index
         private class Builder
         {
             private readonly CompositeReader reader;
-            private readonly List<AtomicReaderContext> leaves = new List<AtomicReaderContext>();
+            private readonly IList<AtomicReaderContext> leaves = new List<AtomicReaderContext>();
             private int leafDocBase = 0;
 
             public Builder(CompositeReader reader)


[37/50] [abbrv] git commit: Finish up Lucene.Net.Analysis.Core

Posted by mh...@apache.org.
Finish up Lucene.Net.Analysis.Core


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/98e877d5
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/98e877d5
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/98e877d5

Branch: refs/heads/branch_4x
Commit: 98e877d50c803e381fcc92250068f366c1dc6c4c
Parents: d72f5c1
Author: Paul Irwin <pa...@gmail.com>
Authored: Wed Aug 7 15:02:40 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Wed Aug 7 15:02:40 2013 -0400

----------------------------------------------------------------------
 src/contrib/Analyzers/Contrib.Analyzers.csproj  | 136 ++++++-------------
 src/contrib/Analyzers/Core/LetterTokenizer.cs   |   4 +-
 src/contrib/Analyzers/Core/LowerCaseFilter.cs   |  34 +++++
 .../Analyzers/Core/LowerCaseFilterFactory.cs    |  31 +++++
 .../Analyzers/Core/LowerCaseTokenizer.cs        |  27 ++++
 .../Analyzers/Core/LowerCaseTokenizerFactory.cs |  32 +++++
 src/contrib/Analyzers/Core/SimpleAnalyzer.cs    |  23 ++++
 src/contrib/Analyzers/Core/StopAnalyzer.cs      |  55 ++++++++
 src/contrib/Analyzers/Core/StopFilter.cs        |  53 ++++++++
 src/contrib/Analyzers/Core/StopFilterFactory.cs |  81 +++++++++++
 src/contrib/Analyzers/Core/TypeTokenFilter.cs   |  34 +++++
 .../Analyzers/Core/TypeTokenFilterFactory.cs    |  63 +++++++++
 .../Analyzers/Core/WhitespaceAnalyzer.cs        |  23 ++++
 .../Analyzers/Core/WhitespaceTokenizer.cs       |  28 ++++
 .../Core/WhitespaceTokenizerFactory.cs          |  26 ++++
 src/contrib/Analyzers/Support/AbstractSet.cs    |   2 +-
 .../Analyzers/Util/AbstractAnalysisFactory.cs   |  11 +-
 src/contrib/Analyzers/Util/CharArrayMap.cs      |  73 +++++++++-
 src/contrib/Analyzers/Util/CharArraySet.cs      |  17 ++-
 src/contrib/Analyzers/Util/CharTokenizer.cs     |   4 +-
 src/contrib/Analyzers/Util/CharacterUtils.cs    |   2 +-
 .../Analyzers/Util/FilteringTokenFilter.cs      |  77 +++++++++++
 .../Analyzers/Util/IMultiTermAwareComponent.cs  |  12 ++
 .../Analyzers/Util/IResourceLoaderAware.cs      |  12 ++
 .../Analyzers/Util/StopwordAnalyzerBase.cs      |  10 +-
 .../Analyzers/Util/TokenFilterFactory.cs        |  44 ++++++
 src/contrib/Analyzers/Util/WordlistLoader.cs    |   4 +-
 27 files changed, 792 insertions(+), 126 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Contrib.Analyzers.csproj
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Contrib.Analyzers.csproj b/src/contrib/Analyzers/Contrib.Analyzers.csproj
index 8613c88..74b0f63 100644
--- a/src/contrib/Analyzers/Contrib.Analyzers.csproj
+++ b/src/contrib/Analyzers/Contrib.Analyzers.csproj
@@ -103,116 +103,39 @@
     <Reference Condition="'$(Framework)' == 'NET35'" Include="System.Core" />
   </ItemGroup>
   <ItemGroup>
-    <Compile Include="AR\ArabicAnalyzer.cs" />
-    <Compile Include="AR\ArabicLetterTokenizer.cs" />
-    <Compile Include="AR\ArabicNormalizationFilter.cs" />
-    <Compile Include="AR\ArabicNormalizer.cs" />
-    <Compile Include="AR\ArabicStemFilter.cs" />
-    <Compile Include="AR\ArabicStemmer.cs" />
-    <Compile Include="BR\BrazilianAnalyzer.cs" />
-    <Compile Include="BR\BrazilianStemFilter.cs" />
-    <Compile Include="BR\BrazilianStemmer.cs" />
-    <Compile Include="CJK\CJKAnalyzer.cs" />
-    <Compile Include="CJK\CJKTokenizer.cs" />
-    <Compile Include="Cn\ChineseAnalyzer.cs" />
-    <Compile Include="Cn\ChineseFilter.cs" />
-    <Compile Include="Cn\ChineseTokenizer.cs" />
-    <Compile Include="Compound\CompoundWordTokenFilterBase.cs" />
-    <Compile Include="Compound\DictionaryCompoundWordTokenFilter.cs" />
-    <Compile Include="Compound\HyphenationCompoundWordTokenFilter.cs" />
-    <Compile Include="Compound\Hyphenation\ByteVector.cs" />
-    <Compile Include="Compound\Hyphenation\CharVector.cs" />
-    <Compile Include="Compound\Hyphenation\Hyphen.cs" />
-    <Compile Include="Compound\Hyphenation\Hyphenation.cs" />
-    <Compile Include="Compound\Hyphenation\HyphenationException.cs" />
-    <Compile Include="Compound\Hyphenation\HyphenationTree.cs" />
-    <Compile Include="Compound\Hyphenation\PatternConsumer.cs" />
-    <Compile Include="Compound\Hyphenation\PatternParser.cs" />
-    <Compile Include="Compound\Hyphenation\TernaryTree.cs" />
     <Compile Include="Core\KeywordAnalyzer.cs" />
     <Compile Include="Core\KeywordTokenizer.cs" />
     <Compile Include="Core\KeywordTokenizerFactory.cs" />
     <Compile Include="Core\LetterTokenizer.cs" />
     <Compile Include="Core\LetterTokenizerFactory.cs" />
-    <Compile Include="Cz\CzechAnalyzer.cs" />
-    <Compile Include="De\GermanAnalyzer.cs" />
-    <Compile Include="De\GermanStemFilter.cs" />
-    <Compile Include="De\GermanStemmer.cs" />
-    <Compile Include="De\GermanDIN2Stemmer.cs" />
-    <Compile Include="El\GreekAnalyzer.cs" />
-    <Compile Include="El\GreekLowerCaseFilter.cs" />
-    <Compile Include="Fa\PersianAnalyzer.cs" />
-    <Compile Include="Fa\PersianNormalizationFilter.cs" />
-    <Compile Include="Fa\PersianNormalizer.cs" />
-    <Compile Include="Fr\ElisionFilter.cs" />
-    <Compile Include="Fr\FrenchAnalyzer.cs" />
-    <Compile Include="Fr\FrenchStemFilter.cs" />
-    <Compile Include="Fr\FrenchStemmer.cs" />
-    <Compile Include="Hunspell\HunspellAffix.cs" />
-    <Compile Include="Hunspell\HunspellDictionary.cs" />
-    <Compile Include="Hunspell\HunspellStem.cs" />
-    <Compile Include="Hunspell\HunspellStemFilter.cs" />
-    <Compile Include="Hunspell\HunspellStemmer.cs" />
-    <Compile Include="Hunspell\HunspellWord.cs" />
-    <Compile Include="Miscellaneous\EmptyTokenStream.cs" />
-    <Compile Include="Miscellaneous\InjectablePrefixAwareTokenFilter.cs" />
-    <Compile Include="Miscellaneous\PatternAnalyzer.cs" />
-    <Compile Include="Miscellaneous\PrefixAndSuffixAwareTokenFilter.cs" />
-    <Compile Include="Miscellaneous\PrefixAwareTokenStream.cs" />
-    <Compile Include="Miscellaneous\SingleTokenTokenStream.cs" />
-    <Compile Include="NGram\EdgeNGramTokenFilter.cs" />
-    <Compile Include="NGram\EdgeNGramTokenizer.cs" />
-    <Compile Include="NGram\NGramTokenFilter.cs" />
-    <Compile Include="NGram\NGramTokenizer.cs" />
-    <Compile Include="Nl\DutchAnalyzer.cs" />
-    <Compile Include="Nl\DutchStemFilter.cs" />
-    <Compile Include="Nl\DutchStemmer.cs" />
-    <Compile Include="Payloads\AbstractEncoder.cs" />
-    <Compile Include="Payloads\DelimitedPayloadTokenFilter.cs" />
-    <Compile Include="Payloads\FloatEncoder.cs" />
-    <Compile Include="Payloads\IdentityEncoder.cs" />
-    <Compile Include="Payloads\IntegerEncoder.cs" />
-    <Compile Include="Payloads\NumericPayloadTokenFilter.cs" />
-    <Compile Include="Payloads\PayloadEncoder.cs" />
-    <Compile Include="Payloads\PayloadHelper.cs" />
-    <Compile Include="Payloads\TokenOffsetPayloadTokenFilter.cs" />
-    <Compile Include="Payloads\TypeAsPayloadTokenFilter.cs" />
-    <Compile Include="Position\PositionFilter.cs" />
-    <Compile Include="Query\QueryAutoStopWordAnalyzer.cs" />
-    <Compile Include="Reverse\ReverseStringFilter.cs" />
-    <Compile Include="Ru\RussianAnalyzer.cs" />
-    <Compile Include="Ru\RussianLetterTokenizer.cs" />
-    <Compile Include="Ru\RussianLowerCaseFilter.cs" />
-    <Compile Include="Ru\RussianStemFilter.cs" />
-    <Compile Include="Ru\RussianStemmer.cs" />
+    <Compile Include="Core\LowerCaseFilter.cs" />
+    <Compile Include="Core\LowerCaseFilterFactory.cs" />
+    <Compile Include="Core\LowerCaseTokenizer.cs" />
+    <Compile Include="Core\LowerCaseTokenizerFactory.cs" />
+    <Compile Include="Core\SimpleAnalyzer.cs" />
+    <Compile Include="Core\StopAnalyzer.cs" />
+    <Compile Include="Core\StopFilter.cs" />
+    <Compile Include="Core\StopFilterFactory.cs" />
+    <Compile Include="Core\TypeTokenFilter.cs" />
+    <Compile Include="Core\TypeTokenFilterFactory.cs" />
+    <Compile Include="Core\WhitespaceAnalyzer.cs" />
+    <Compile Include="Core\WhitespaceTokenizer.cs" />
+    <Compile Include="Core\WhitespaceTokenizerFactory.cs" />
     <Compile Include="Properties\AssemblyInfo.cs" />
-    <Compile Include="Shingle\Matrix\Column.cs" />
-    <Compile Include="Shingle\Matrix\Matrix.cs" />
-    <Compile Include="Shingle\Matrix\MatrixPermutationIterator.cs" />
-    <Compile Include="Shingle\Matrix\Row.cs" />
-    <Compile Include="Shingle\ShingleAnalyzerWrapper.cs" />
-    <Compile Include="Shingle\ShingleFilter.cs" />
-    <Compile Include="Shingle\ShingleMatrixFilter.cs" />
-    <Compile Include="Shingle\TokenPositioner.cs" />
-    <Compile Include="Shingle\Codec\OneDimensionalNonWeightedTokenSettingsCodec.cs" />
-    <Compile Include="Shingle\Codec\SimpleThreeDimensionalTokenSettingsCodec.cs" />
-    <Compile Include="Shingle\Codec\TokenSettingsCodec.cs" />
-    <Compile Include="Shingle\Codec\TwoDimensionalNonWeightedSynonymTokenSettingsCodec.cs" />
-    <Compile Include="Sinks\DateRecognizerSinkFilter.cs" />
-    <Compile Include="Sinks\TokenRangeSinkFilter.cs" />
-    <Compile Include="Sinks\TokenTypeSinkFilter.cs" />
     <Compile Include="Support\AbstractSet.cs" />
     <Compile Include="Support\StringExtensions.cs" />
-    <Compile Include="Th\ThaiAnalyzer.cs" />
-    <Compile Include="Th\ThaiWordFilter.cs" />
     <Compile Include="Util\AbstractAnalysisFactory.cs" />
     <Compile Include="Util\AnalysisSPILoader.cs" />
     <Compile Include="Util\CharacterUtils.cs" />
     <Compile Include="Util\CharArrayMap.cs" />
     <Compile Include="Util\CharArraySet.cs" />
     <Compile Include="Util\CharTokenizer.cs" />
+    <Compile Include="Util\FilteringTokenFilter.cs" />
+    <Compile Include="Util\IMultiTermAwareComponent.cs" />
     <Compile Include="Util\IResourceLoader.cs" />
+    <Compile Include="Util\IResourceLoaderAware.cs" />
     <Compile Include="Util\StopwordAnalyzerBase.cs" />
+    <Compile Include="Util\TokenFilterFactory.cs" />
     <Compile Include="Util\TokenizerFactory.cs" />
     <Compile Include="Util\WordlistLoader.cs" />
     <Compile Include="WordlistLoader.cs" />
@@ -227,9 +150,32 @@
     </ProjectReference>
   </ItemGroup>
   <ItemGroup>
-    <None Include="Compound\Hyphenation\hyphenation.dtd" />
     <None Include="Lucene.Net.snk" />
   </ItemGroup>
+  <ItemGroup>
+    <Folder Include="BR\" />
+    <Folder Include="CJK\" />
+    <Folder Include="Cn\" />
+    <Folder Include="Compound\Hyphenation\" />
+    <Folder Include="Cz\" />
+    <Folder Include="De\" />
+    <Folder Include="El\" />
+    <Folder Include="Fa\" />
+    <Folder Include="Fr\" />
+    <Folder Include="Hunspell\" />
+    <Folder Include="Miscellaneous\" />
+    <Folder Include="NGram\" />
+    <Folder Include="Nl\" />
+    <Folder Include="Payloads\" />
+    <Folder Include="Position\" />
+    <Folder Include="Query\" />
+    <Folder Include="Reverse\" />
+    <Folder Include="Ru\" />
+    <Folder Include="Shingle\Codec\" />
+    <Folder Include="Shingle\Matrix\" />
+    <Folder Include="Sinks\" />
+    <Folder Include="Th\" />
+  </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/LetterTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/LetterTokenizer.cs b/src/contrib/Analyzers/Core/LetterTokenizer.cs
index 669d8dc..a4e4938 100644
--- a/src/contrib/Analyzers/Core/LetterTokenizer.cs
+++ b/src/contrib/Analyzers/Core/LetterTokenizer.cs
@@ -10,12 +10,12 @@ namespace Lucene.Net.Analysis.Core
 {
     public class LetterTokenizer : CharTokenizer
     {
-        public LetterTokenizer(Version matchVersion, TextReader input)
+        public LetterTokenizer(Version? matchVersion, TextReader input)
             : base(matchVersion, input)
         {
         }
 
-        public LetterTokenizer(Version matchVersion, AttributeFactory factory, TextReader input)
+        public LetterTokenizer(Version? matchVersion, AttributeFactory factory, TextReader input)
             : base(matchVersion, factory, input)
         {
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/LowerCaseFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/LowerCaseFilter.cs b/src/contrib/Analyzers/Core/LowerCaseFilter.cs
new file mode 100644
index 0000000..d0157f5
--- /dev/null
+++ b/src/contrib/Analyzers/Core/LowerCaseFilter.cs
@@ -0,0 +1,34 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public sealed class LowerCaseFilter : TokenFilter
+    {
+        private readonly CharacterUtils charUtils;
+        private readonly ICharTermAttribute termAtt; // = addAttribute(CharTermAttribute.class);
+
+        public LowerCaseFilter(Version? matchVersion, TokenStream input)
+            : base(input)
+        {
+            charUtils = CharacterUtils.GetInstance(matchVersion);
+            termAtt = AddAttribute<ICharTermAttribute>();
+        }
+
+        public override bool IncrementToken()
+        {
+            if (input.IncrementToken())
+            {
+                charUtils.ToLowerCase(termAtt.Buffer, 0, termAtt.Length);
+                return true;
+            }
+            else
+                return false;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/LowerCaseFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/LowerCaseFilterFactory.cs b/src/contrib/Analyzers/Core/LowerCaseFilterFactory.cs
new file mode 100644
index 0000000..6ea42e9
--- /dev/null
+++ b/src/contrib/Analyzers/Core/LowerCaseFilterFactory.cs
@@ -0,0 +1,31 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public class LowerCaseFilterFactory : TokenFilterFactory, IMultiTermAwareComponent
+    {
+        public LowerCaseFilterFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            AssureMatchVersion();
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+        
+        public override TokenStream Create(TokenStream input)
+        {
+            return new LowerCaseFilter(luceneMatchVersion, input);
+        }
+
+        public AbstractAnalysisFactory MultiTermComponent
+        {
+            get { return this; }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/LowerCaseTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/LowerCaseTokenizer.cs b/src/contrib/Analyzers/Core/LowerCaseTokenizer.cs
new file mode 100644
index 0000000..34d4a23
--- /dev/null
+++ b/src/contrib/Analyzers/Core/LowerCaseTokenizer.cs
@@ -0,0 +1,27 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public sealed class LowerCaseTokenizer : LetterTokenizer
+    {
+        public LowerCaseTokenizer(Version? matchVersion, TextReader input)
+            : base(matchVersion, input)
+        {
+        }
+
+        public LowerCaseTokenizer(Version? matchVersion, AttributeFactory factory, TextReader input)
+            : base(matchVersion, factory, input)
+        {
+        }
+
+        protected override int Normalize(int c)
+        {
+            return (int)char.ToLower((char)c);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/LowerCaseTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/LowerCaseTokenizerFactory.cs b/src/contrib/Analyzers/Core/LowerCaseTokenizerFactory.cs
new file mode 100644
index 0000000..316f775
--- /dev/null
+++ b/src/contrib/Analyzers/Core/LowerCaseTokenizerFactory.cs
@@ -0,0 +1,32 @@
+using Lucene.Net.Analysis.Util;
+using Lucene.Net.Support;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public class LowerCaseTokenizerFactory : TokenizerFactory, IMultiTermAwareComponent
+    {
+        public LowerCaseTokenizerFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            AssureMatchVersion();
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+
+        public override Tokenizer Create(Net.Util.AttributeSource.AttributeFactory factory, System.IO.TextReader input)
+        {
+            return new LowerCaseTokenizer(luceneMatchVersion, factory, input);
+        }
+        
+        public AbstractAnalysisFactory MultiTermComponent
+        {
+            get { return new LowerCaseFilterFactory(new HashMap<String, String>(OriginalArgs)); }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/SimpleAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/SimpleAnalyzer.cs b/src/contrib/Analyzers/Core/SimpleAnalyzer.cs
new file mode 100644
index 0000000..2b2b97d
--- /dev/null
+++ b/src/contrib/Analyzers/Core/SimpleAnalyzer.cs
@@ -0,0 +1,23 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public sealed class SimpleAnalyzer : Analyzer
+    {
+        private readonly Version? matchVersion;
+
+        public SimpleAnalyzer(Version? matchVersion)
+        {
+            this.matchVersion = matchVersion;
+        }
+        
+        public override Analyzer.TokenStreamComponents CreateComponents(string fieldName, System.IO.TextReader reader)
+        {
+            return new TokenStreamComponents(new LowerCaseTokenizer(matchVersion, reader));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/StopAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/StopAnalyzer.cs b/src/contrib/Analyzers/Core/StopAnalyzer.cs
new file mode 100644
index 0000000..ed41f02
--- /dev/null
+++ b/src/contrib/Analyzers/Core/StopAnalyzer.cs
@@ -0,0 +1,55 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public sealed class StopAnalyzer : StopwordAnalyzerBase
+    {
+        public static readonly CharArraySet ENGLISH_STOP_WORDS_SET;
+
+        static StopAnalyzer()
+        {
+            string[] stopWords = new string[] {
+              "a", "an", "and", "are", "as", "at", "be", "but", "by",
+              "for", "if", "in", "into", "is", "it",
+              "no", "not", "of", "on", "or", "such",
+              "that", "the", "their", "then", "there", "these",
+              "they", "this", "to", "was", "will", "with"
+            };
+            CharArraySet stopSet = new CharArraySet(Version.LUCENE_CURRENT, stopWords, false);
+            ENGLISH_STOP_WORDS_SET = CharArraySet.UnmodifiableSet(stopSet);
+        }
+
+        public StopAnalyzer(Version? matchVersion)
+            : this(matchVersion, ENGLISH_STOP_WORDS_SET)
+        {
+        }
+
+        public StopAnalyzer(Version? matchVersion, CharArraySet stopWords)
+            : base(matchVersion, stopWords)
+        {
+        }
+
+        public StopAnalyzer(Version? matchVersion, Stream stopwordsFile)
+            : this(matchVersion, LoadStopwordSet(stopwordsFile, matchVersion))
+        {
+        }
+
+        public StopAnalyzer(Version? matchVersion, TextReader stopwords)
+            : this(matchVersion, LoadStopwordSet(stopwords, matchVersion))
+        {
+        }
+
+        public override Analyzer.TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
+        {
+            Tokenizer source = new LowerCaseTokenizer(matchVersion, reader);
+            return new TokenStreamComponents(source, new StopFilter(matchVersion,
+                  source, stopwords));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/StopFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/StopFilter.cs b/src/contrib/Analyzers/Core/StopFilter.cs
new file mode 100644
index 0000000..c9a193b
--- /dev/null
+++ b/src/contrib/Analyzers/Core/StopFilter.cs
@@ -0,0 +1,53 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Analysis.Util;
+using Lucene.Net.Support;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public sealed class StopFilter : FilteringTokenFilter
+    {
+        private readonly CharArraySet stopWords;
+        private readonly ICharTermAttribute termAtt; // = addAttribute(CharTermAttribute.class);
+
+        public StopFilter(Version? matchVersion, TokenStream input, CharArraySet stopWords)
+            : base(true, input)
+        {
+            this.stopWords = stopWords;
+            termAtt = AddAttribute<ICharTermAttribute>();
+        }
+
+        public static CharArraySet MakeStopSet(Version? matchVersion, params String[] stopWords)
+        {
+            return MakeStopSet(matchVersion, stopWords, false);
+        }
+
+        public static CharArraySet MakeStopSet(Version? matchVersion, List<object> stopWords)
+        {
+            return MakeStopSet(matchVersion, stopWords, false);
+        }
+
+        public static CharArraySet MakeStopSet(Version? matchVersion, String[] stopWords, bool ignoreCase)
+        {
+            CharArraySet stopSet = new CharArraySet(matchVersion, stopWords.Length, ignoreCase);
+            stopSet.AddAll(stopWords);
+            return stopSet;
+        }
+
+        public static CharArraySet MakeStopSet(Version? matchVersion, List<object> stopWords, bool ignoreCase)
+        {
+            CharArraySet stopSet = new CharArraySet(matchVersion, stopWords.Count, ignoreCase);
+            stopSet.AddAll(stopWords);
+            return stopSet;
+        }
+
+        protected override bool Accept()
+        {
+            return !stopWords.Contains(termAtt.Buffer, 0, termAtt.Length);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/StopFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/StopFilterFactory.cs b/src/contrib/Analyzers/Core/StopFilterFactory.cs
new file mode 100644
index 0000000..907c383
--- /dev/null
+++ b/src/contrib/Analyzers/Core/StopFilterFactory.cs
@@ -0,0 +1,81 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public class StopFilterFactory : TokenFilterFactory, IResourceLoaderAware
+    {
+        private CharArraySet stopWords;
+        private readonly String stopWordFiles;
+        private readonly String format;
+        private readonly bool ignoreCase;
+        private readonly bool enablePositionIncrements;
+
+        public StopFilterFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            AssureMatchVersion();
+            stopWordFiles = Get(args, "words");
+            format = Get(args, "format");
+            ignoreCase = GetBoolean(args, "ignoreCase", false);
+            enablePositionIncrements = GetBoolean(args, "enablePositionIncrements", false);
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+
+        public void Inform(IResourceLoader loader)
+        {
+            if (stopWordFiles != null)
+            {
+                if ("snowball".EqualsIgnoreCase(format))
+                {
+                    stopWords = GetSnowballWordSet(loader, stopWordFiles, ignoreCase);
+                }
+                else
+                {
+                    stopWords = GetWordSet(loader, stopWordFiles, ignoreCase);
+                }
+            }
+            else
+            {
+                stopWords = new CharArraySet(luceneMatchVersion, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
+            }
+        }
+
+        public bool IsEnablePositionIncrements
+        {
+            get
+            {
+                return enablePositionIncrements;
+            }
+        }
+
+        public bool IsIgnoreCase
+        {
+            get
+            {
+                return ignoreCase;
+            }
+        }
+
+        public CharArraySet StopWords
+        {
+            get
+            {
+                return stopWords;
+            }
+        }
+        
+        public override TokenStream Create(TokenStream input)
+        {
+            StopFilter stopFilter = new StopFilter(luceneMatchVersion, input, stopWords);
+            stopFilter.EnablePositionIncrements = enablePositionIncrements;
+            return stopFilter;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/TypeTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/TypeTokenFilter.cs b/src/contrib/Analyzers/Core/TypeTokenFilter.cs
new file mode 100644
index 0000000..f6ea7dd
--- /dev/null
+++ b/src/contrib/Analyzers/Core/TypeTokenFilter.cs
@@ -0,0 +1,34 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public sealed class TypeTokenFilter : FilteringTokenFilter
+    {
+        private readonly ISet<String> stopTypes;
+        private readonly ITypeAttribute typeAttribute; // = addAttribute(TypeAttribute.class);
+        private readonly bool useWhiteList;
+
+        public TypeTokenFilter(bool enablePositionIncrements, TokenStream input, ISet<String> stopTypes, bool useWhiteList)
+            : base(enablePositionIncrements, input)
+        {
+            this.stopTypes = stopTypes;
+            this.useWhiteList = useWhiteList;
+            typeAttribute = AddAttribute<ITypeAttribute>();
+        }
+
+        public TypeTokenFilter(bool enablePositionIncrements, TokenStream input, ISet<String> stopTypes)
+            : this(enablePositionIncrements, input, stopTypes, false)
+        {
+        }
+
+        protected override bool Accept()
+        {
+            return useWhiteList == stopTypes.Contains(typeAttribute.Type);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/TypeTokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/TypeTokenFilterFactory.cs b/src/contrib/Analyzers/Core/TypeTokenFilterFactory.cs
new file mode 100644
index 0000000..1552375
--- /dev/null
+++ b/src/contrib/Analyzers/Core/TypeTokenFilterFactory.cs
@@ -0,0 +1,63 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public class TypeTokenFilterFactory : TokenFilterFactory, IResourceLoaderAware
+    {
+        private readonly bool useWhitelist;
+        private readonly bool enablePositionIncrements;
+        private readonly String stopTypesFiles;
+        private ISet<String> stopTypes;
+
+        public TypeTokenFilterFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            stopTypesFiles = Require(args, "types");
+            enablePositionIncrements = GetBoolean(args, "enablePositionIncrements", false);
+            useWhitelist = GetBoolean(args, "useWhitelist", false);
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+
+        public void Inform(IResourceLoader loader)
+        {
+            IList<String> files = SplitFileNames(stopTypesFiles);
+            if (files.Count > 0)
+            {
+                stopTypes = new HashSet<String>();
+                foreach (String file in files)
+                {
+                    IList<String> typesLines = GetLines(loader, file.Trim());
+                    stopTypes.UnionWith(typesLines);
+                }
+            }
+        }
+
+        public bool IsEnablePositionIncrements
+        {
+            get
+            {
+                return enablePositionIncrements;
+            }
+        }
+
+        public ISet<String> StopTypes
+        {
+            get
+            {
+                return stopTypes;
+            }
+        }
+
+        public override TokenStream Create(TokenStream input)
+        {
+            return new TypeTokenFilter(enablePositionIncrements, input, stopTypes, useWhitelist);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/WhitespaceAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/WhitespaceAnalyzer.cs b/src/contrib/Analyzers/Core/WhitespaceAnalyzer.cs
new file mode 100644
index 0000000..180329e
--- /dev/null
+++ b/src/contrib/Analyzers/Core/WhitespaceAnalyzer.cs
@@ -0,0 +1,23 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public sealed class WhitespaceAnalyzer : Analyzer
+    {
+        private readonly Version? matchVersion;
+
+        public WhitespaceAnalyzer(Version? matchVersion)
+        {
+            this.matchVersion = matchVersion;
+        }
+        
+        public override Analyzer.TokenStreamComponents CreateComponents(string fieldName, System.IO.TextReader reader)
+        {
+            return new TokenStreamComponents(new WhitespaceTokenizer(matchVersion, reader));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/WhitespaceTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/WhitespaceTokenizer.cs b/src/contrib/Analyzers/Core/WhitespaceTokenizer.cs
new file mode 100644
index 0000000..87909a2
--- /dev/null
+++ b/src/contrib/Analyzers/Core/WhitespaceTokenizer.cs
@@ -0,0 +1,28 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public sealed class WhitespaceTokenizer : CharTokenizer
+    {
+        public WhitespaceTokenizer(Version? matchVersion, TextReader input)
+            : base(matchVersion, input)
+        {
+        }
+
+        public WhitespaceTokenizer(Version? matchVersion, AttributeFactory factory, TextReader input)
+            : base(matchVersion, factory, input)
+        {
+        }
+
+        protected override bool IsTokenChar(int c)
+        {
+            return !char.IsWhiteSpace((char)c);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Core/WhitespaceTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/WhitespaceTokenizerFactory.cs b/src/contrib/Analyzers/Core/WhitespaceTokenizerFactory.cs
new file mode 100644
index 0000000..378d30f
--- /dev/null
+++ b/src/contrib/Analyzers/Core/WhitespaceTokenizerFactory.cs
@@ -0,0 +1,26 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Core
+{
+    public class WhitespaceTokenizerFactory : TokenizerFactory
+    {
+        public WhitespaceTokenizerFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            AssureMatchVersion();
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+
+        public override Tokenizer Create(Net.Util.AttributeSource.AttributeFactory factory, System.IO.TextReader input)
+        {
+            return new WhitespaceTokenizer(luceneMatchVersion, factory, input);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Support/AbstractSet.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Support/AbstractSet.cs b/src/contrib/Analyzers/Support/AbstractSet.cs
index f732d08..a9249d5 100644
--- a/src/contrib/Analyzers/Support/AbstractSet.cs
+++ b/src/contrib/Analyzers/Support/AbstractSet.cs
@@ -5,7 +5,7 @@ using System.Text;
 
 namespace Lucene.Net.Analysis.Support
 {
-    public class AbstractSet<T> : ISet<T>
+    public abstract class AbstractSet<T> : ISet<T>
     {
         public virtual bool Add(T item)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/AbstractAnalysisFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/AbstractAnalysisFactory.cs b/src/contrib/Analyzers/Util/AbstractAnalysisFactory.cs
index ab0b117..b815eb6 100644
--- a/src/contrib/Analyzers/Util/AbstractAnalysisFactory.cs
+++ b/src/contrib/Analyzers/Util/AbstractAnalysisFactory.cs
@@ -1,4 +1,5 @@
-using Lucene.Net.Support;
+using Lucene.Net.Analysis.Core;
+using Lucene.Net.Support;
 using Lucene.Net.Util;
 using System;
 using System.Collections.Generic;
@@ -15,7 +16,7 @@ namespace Lucene.Net.Analysis.Util
 
         private readonly IDictionary<string, string> originalArgs;
 
-        protected readonly Lucene.Net.Util.Version luceneMatchVersion;
+        protected readonly Lucene.Net.Util.Version? luceneMatchVersion;
 
         private bool isExplicitLuceneMatchVersion = false;
 
@@ -23,7 +24,7 @@ namespace Lucene.Net.Analysis.Util
         {
             originalArgs = new HashMap<String, String>(args);
             String version = Get(args, LUCENE_MATCH_VERSION_PARAM);
-            luceneMatchVersion = version == null ? (Lucene.Net.Util.Version)null : version.ParseLeniently();
+            luceneMatchVersion = version == null ? (Lucene.Net.Util.Version?)null : version.ParseLeniently();
             args.Remove(CLASS_NAME);  // consume the class arg
         }
 
@@ -44,7 +45,7 @@ namespace Lucene.Net.Analysis.Util
             }
         }
 
-        public Lucene.Net.Util.Version LuceneMatchVersion
+        public Lucene.Net.Util.Version? LuceneMatchVersion
         {
             get
             {
@@ -274,7 +275,7 @@ namespace Lucene.Net.Analysis.Util
                 foreach (String file in files)
                 {
                     IList<String> wlist = GetLines(loader, file.Trim());
-                    words.UnionWith(StopFilter.MakeStopSet(luceneMatchVersion, wlist,
+                    words.UnionWith(StopFilter.MakeStopSet(luceneMatchVersion, wlist.Cast<object>().ToList(),
                         ignoreCase));
                 }
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/CharArrayMap.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/CharArrayMap.cs b/src/contrib/Analyzers/Util/CharArrayMap.cs
index e124451..fb7ee13 100644
--- a/src/contrib/Analyzers/Util/CharArrayMap.cs
+++ b/src/contrib/Analyzers/Util/CharArrayMap.cs
@@ -16,11 +16,11 @@ namespace Lucene.Net.Analysis.Util
         private readonly CharacterUtils charUtils;
         private bool ignoreCase;
         private int count;
-        internal readonly Lucene.Net.Util.Version matchVersion; // package private because used in CharArraySet
+        internal readonly Lucene.Net.Util.Version? matchVersion; // package private because used in CharArraySet
         internal char[][] keys; // package private because used in CharArraySet's non Set-conform CharArraySetIterator
         internal V[] values; // package private because used in CharArraySet's non Set-conform CharArraySetIterator
 
-        public CharArrayMap(Lucene.Net.Util.Version matchVersion, int startSize, bool ignoreCase)
+        public CharArrayMap(Lucene.Net.Util.Version? matchVersion, int startSize, bool ignoreCase)
         {
             this.ignoreCase = ignoreCase;
             int size = INIT_SIZE;
@@ -28,11 +28,11 @@ namespace Lucene.Net.Analysis.Util
                 size <<= 1;
             keys = new char[size][];
             values = new V[size];
-            this.charUtils = CharacterUtils.GetInstance(matchVersion);
+            this.charUtils = CharacterUtils.GetInstance(matchVersion.GetValueOrDefault());
             this.matchVersion = matchVersion;
         }
 
-        public CharArrayMap(Lucene.Net.Util.Version matchVersion, IDictionary<object, V> c, bool ignoreCase)
+        public CharArrayMap(Lucene.Net.Util.Version? matchVersion, IDictionary<object, V> c, bool ignoreCase)
             : this(matchVersion, c.Count, ignoreCase)
         {
             foreach (var kvp in c)
@@ -367,7 +367,7 @@ namespace Lucene.Net.Analysis.Util
                 if (keySet == null)
                 {
                     // prevent adding of entries
-                    keySet = new AnonymousCharArraySet(this);
+                    keySet = new AnonymousCharArraySet(new CharArrayMap<object>(matchVersion, this.ToDictionary(i => (object)i.Key, i => (object)i.Value), ignoreCase));
                 }
 
                 return keySet;
@@ -376,7 +376,7 @@ namespace Lucene.Net.Analysis.Util
 
         private sealed class AnonymousCharArraySet : CharArraySet
         {
-            public AnonymousCharArraySet(CharArrayMap<V> map)
+            public AnonymousCharArraySet(CharArrayMap<object> map)
                 : base(map)
             {
             }
@@ -581,6 +581,65 @@ namespace Lucene.Net.Analysis.Util
                 parent.Clear();
             }
         }
+
+        public void Add(object key, V value)
+        {
+            Put(key, value);
+        }
+
+        bool IDictionary<object, V>.Remove(object key)
+        {
+            Remove(key);
+            return true;
+        }
+
+        public bool TryGetValue(object key, out V value)
+        {
+            value = Get(key);
+
+            return value != null;
+        }
+
+        public ICollection<V> Values
+        {
+            get { return values; }
+        }
+
+        public void Add(KeyValuePair<object, V> item)
+        {
+            Put(item.Key, item.Value);
+        }
+
+        public bool Contains(KeyValuePair<object, V> item)
+        {
+            return ContainsKey(item.Key);
+        }
+
+        public void CopyTo(KeyValuePair<object, V>[] array, int arrayIndex)
+        {
+            throw new NotImplementedException();
+        }
+
+        public bool IsReadOnly
+        {
+            get { return false; }
+        }
+
+        public bool Remove(KeyValuePair<object, V> item)
+        {
+            Remove(item.Key);
+            return true;
+        }
+
+        public IEnumerator<KeyValuePair<object, V>> GetEnumerator()
+        {
+            return GetEntrySet().GetEnumerator();
+        }
+
+        System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
+        {
+            return GetEnumerator();
+        }
     }
 
     // .NET Port: non-generic static clas to hold nested types and static methods
@@ -597,7 +656,7 @@ namespace Lucene.Net.Analysis.Util
             return new UnmodifiableCharArrayMap<V>(map);
         }
 
-        public static CharArrayMap<V> Copy<V>(Lucene.Net.Util.Version matchVersion, IDictionary<object, V> map)
+        public static CharArrayMap<V> Copy<V>(Lucene.Net.Util.Version? matchVersion, IDictionary<object, V> map)
         {
             if (map == CharArrayMap<V>.EMPTY_MAP)
                 return EmptyMap<V>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/CharArraySet.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/CharArraySet.cs b/src/contrib/Analyzers/Util/CharArraySet.cs
index 522bcaa..23eb0ea 100644
--- a/src/contrib/Analyzers/Util/CharArraySet.cs
+++ b/src/contrib/Analyzers/Util/CharArraySet.cs
@@ -14,12 +14,12 @@ namespace Lucene.Net.Analysis.Util
 
         private readonly CharArrayMap<object> map;
 
-        public CharArraySet(Lucene.Net.Util.Version matchVersion, int startSize, bool ignoreCase)
+        public CharArraySet(Lucene.Net.Util.Version? matchVersion, int startSize, bool ignoreCase)
             : this(new CharArrayMap<Object>(matchVersion, startSize, ignoreCase))
         {
         }
 
-        public CharArraySet(Lucene.Net.Util.Version matchVersion, ICollection<object> c, bool ignoreCase)
+        public CharArraySet(Lucene.Net.Util.Version? matchVersion, ICollection<object> c, bool ignoreCase)
             : this(matchVersion, c.Count, ignoreCase)
         {
             AddAll(c);
@@ -55,17 +55,17 @@ namespace Lucene.Net.Analysis.Util
             return map.Put(o, PLACEHOLDER) == null;
         }
 
-        public bool Add(ICharSequence text)
+        public virtual bool Add(ICharSequence text)
         {
             return map.Put(text, PLACEHOLDER) == null;
         }
 
-        public bool Add(string text)
+        public virtual bool Add(string text)
         {
             return map.Put(text, PLACEHOLDER) == null;
         }
 
-        public bool Add(char[] text)
+        public virtual bool Add(char[] text)
         {
             return map.Put(text, PLACEHOLDER) == null;
         }
@@ -86,7 +86,7 @@ namespace Lucene.Net.Analysis.Util
             return new CharArraySet(CharArrayMap.UnmodifiableMap(set.map));
         }
 
-        public static CharArraySet Copy(Lucene.Net.Util.Version matchVersion, ICollection<object> set)
+        public static CharArraySet Copy(Lucene.Net.Util.Version? matchVersion, ICollection<object> set)
         {
             if (set == EMPTY_SET)
                 return EMPTY_SET;
@@ -121,5 +121,10 @@ namespace Lucene.Net.Analysis.Util
             }
             return sb.Append(']').ToString();
         }
+
+        public override bool Remove(object item)
+        {
+            throw new NotImplementedException();
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/CharTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/CharTokenizer.cs b/src/contrib/Analyzers/Util/CharTokenizer.cs
index b0029fa..0a31781 100644
--- a/src/contrib/Analyzers/Util/CharTokenizer.cs
+++ b/src/contrib/Analyzers/Util/CharTokenizer.cs
@@ -11,7 +11,7 @@ namespace Lucene.Net.Analysis.Util
 {
     public abstract class CharTokenizer : Tokenizer
     {
-        public CharTokenizer(Version matchVersion, TextReader input)
+        public CharTokenizer(Version? matchVersion, TextReader input)
             : base(input)
         {
             charUtils = CharacterUtils.GetInstance(matchVersion);
@@ -19,7 +19,7 @@ namespace Lucene.Net.Analysis.Util
             offsetAtt = AddAttribute<IOffsetAttribute>();
         }
 
-        public CharTokenizer(Version matchVersion, AttributeFactory factory, TextReader input)
+        public CharTokenizer(Version? matchVersion, AttributeFactory factory, TextReader input)
             : base(factory, input)
         {
             charUtils = CharacterUtils.GetInstance(matchVersion);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/CharacterUtils.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/CharacterUtils.cs b/src/contrib/Analyzers/Util/CharacterUtils.cs
index 223d8f0..5fdc78f 100644
--- a/src/contrib/Analyzers/Util/CharacterUtils.cs
+++ b/src/contrib/Analyzers/Util/CharacterUtils.cs
@@ -16,7 +16,7 @@ namespace Lucene.Net.Analysis.Util
         // .NET Port: we never changed how we handle strings and chars :-)
         private static readonly DotNetCharacterUtils DOTNET = new DotNetCharacterUtils();
 
-        public static CharacterUtils GetInstance(Lucene.Net.Util.Version matchVersion)
+        public static CharacterUtils GetInstance(Lucene.Net.Util.Version? matchVersion)
         {
             //return matchVersion.OnOrAfter(Lucene.Net.Util.Version.LUCENE_31) ? JAVA_5 : JAVA_4;
             return DOTNET;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/FilteringTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/FilteringTokenFilter.cs b/src/contrib/Analyzers/Util/FilteringTokenFilter.cs
new file mode 100644
index 0000000..d06af92
--- /dev/null
+++ b/src/contrib/Analyzers/Util/FilteringTokenFilter.cs
@@ -0,0 +1,77 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Util
+{
+    public abstract class FilteringTokenFilter : TokenFilter
+    {
+        private readonly IPositionIncrementAttribute posIncrAtt; // = addAttribute(PositionIncrementAttribute.class);
+        private bool enablePositionIncrements; // no init needed, as ctor enforces setting value!
+        private bool first = true; // only used when not preserving gaps
+
+        public FilteringTokenFilter(bool enablePositionIncrements, TokenStream input)
+            : base(input)
+        {
+            this.enablePositionIncrements = enablePositionIncrements;
+            posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
+        }
+
+        protected abstract bool Accept();
+
+        public override bool IncrementToken()
+        {
+            if (enablePositionIncrements)
+            {
+                int skippedPositions = 0;
+                while (input.IncrementToken())
+                {
+                    if (Accept())
+                    {
+                        if (skippedPositions != 0)
+                        {
+                            posIncrAtt.PositionIncrement = posIncrAtt.PositionIncrement + skippedPositions;
+                        }
+                        return true;
+                    }
+                    skippedPositions += posIncrAtt.PositionIncrement;
+                }
+            }
+            else
+            {
+                while (input.IncrementToken())
+                {
+                    if (Accept())
+                    {
+                        if (first)
+                        {
+                            // first token having posinc=0 is illegal.
+                            if (posIncrAtt.PositionIncrement == 0)
+                            {
+                                posIncrAtt.PositionIncrement = 1;
+                            }
+                            first = false;
+                        }
+                        return true;
+                    }
+                }
+            }
+            // reached EOS -- return false
+            return false;
+        }
+
+        public override void Reset()
+        {
+            base.Reset();
+            first = true;
+        }
+
+        public bool EnablePositionIncrements
+        {
+            get { return enablePositionIncrements; }
+            set { enablePositionIncrements = value; }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/IMultiTermAwareComponent.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/IMultiTermAwareComponent.cs b/src/contrib/Analyzers/Util/IMultiTermAwareComponent.cs
new file mode 100644
index 0000000..3e256c3
--- /dev/null
+++ b/src/contrib/Analyzers/Util/IMultiTermAwareComponent.cs
@@ -0,0 +1,12 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Util
+{
+    public interface IMultiTermAwareComponent
+    {
+        AbstractAnalysisFactory MultiTermComponent { get; }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/IResourceLoaderAware.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/IResourceLoaderAware.cs b/src/contrib/Analyzers/Util/IResourceLoaderAware.cs
new file mode 100644
index 0000000..8ff35bf
--- /dev/null
+++ b/src/contrib/Analyzers/Util/IResourceLoaderAware.cs
@@ -0,0 +1,12 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Util
+{
+    public interface IResourceLoaderAware
+    {
+        void Inform(IResourceLoader loader);
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/StopwordAnalyzerBase.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/StopwordAnalyzerBase.cs b/src/contrib/Analyzers/Util/StopwordAnalyzerBase.cs
index f6e9194..a041e65 100644
--- a/src/contrib/Analyzers/Util/StopwordAnalyzerBase.cs
+++ b/src/contrib/Analyzers/Util/StopwordAnalyzerBase.cs
@@ -12,7 +12,7 @@ namespace Lucene.Net.Analysis.Util
     {
         protected readonly CharArraySet stopwords;
 
-        protected readonly Version matchVersion;
+        protected readonly Version? matchVersion;
 
         public CharArraySet StopwordSet
         {
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis.Util
             }
         }
 
-        protected StopwordAnalyzerBase(Version version, CharArraySet stopwords)
+        protected StopwordAnalyzerBase(Version? version, CharArraySet stopwords)
         {
             matchVersion = version;
             // analyzers should use char array set for stopwords!
@@ -30,7 +30,7 @@ namespace Lucene.Net.Analysis.Util
                 .UnmodifiableSet(CharArraySet.Copy(version, stopwords));
         }
 
-        protected StopwordAnalyzerBase(Version version)
+        protected StopwordAnalyzerBase(Version? version)
             : this(version, null)
         {
         }
@@ -49,7 +49,7 @@ namespace Lucene.Net.Analysis.Util
             }
         }
 
-        protected static CharArraySet LoadStopwordSet(Stream stopwords, Version matchVersion)
+        protected static CharArraySet LoadStopwordSet(Stream stopwords, Version? matchVersion)
         {
             TextReader reader = null;
             try
@@ -63,7 +63,7 @@ namespace Lucene.Net.Analysis.Util
             }
         }
 
-        protected static CharArraySet LoadStopwordSet(TextReader stopwords, Version matchVersion)
+        protected static CharArraySet LoadStopwordSet(TextReader stopwords, Version? matchVersion)
         {
             try
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/TokenFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/TokenFilterFactory.cs b/src/contrib/Analyzers/Util/TokenFilterFactory.cs
new file mode 100644
index 0000000..fcb674e
--- /dev/null
+++ b/src/contrib/Analyzers/Util/TokenFilterFactory.cs
@@ -0,0 +1,44 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Util
+{
+    public abstract class TokenFilterFactory : AbstractAnalysisFactory
+    {
+        private static readonly AnalysisSPILoader<TokenFilterFactory> loader =
+            new AnalysisSPILoader<TokenFilterFactory>(typeof(TokenFilterFactory),
+                new String[] { "TokenFilterFactory", "FilterFactory" });
+
+        public static TokenFilterFactory ForName(String name, IDictionary<String, String> args)
+        {
+            return loader.NewInstance(name, args);
+        }
+
+        public static Type LookupClass(String name)
+        {
+            return loader.LookupClass(name);
+        }
+
+        public static ICollection<String> AvailableTokenFilters
+        {
+            get
+            {
+                return loader.AvailableServices;
+            }
+        }
+
+        public static void ReloadTokenFilters()
+        {
+            loader.Reload();
+        }
+
+        protected TokenFilterFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+        }
+
+        public abstract TokenStream Create(TokenStream input);
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/98e877d5/src/contrib/Analyzers/Util/WordlistLoader.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/WordlistLoader.cs b/src/contrib/Analyzers/Util/WordlistLoader.cs
index e78ea9b..c2430d0 100644
--- a/src/contrib/Analyzers/Util/WordlistLoader.cs
+++ b/src/contrib/Analyzers/Util/WordlistLoader.cs
@@ -31,12 +31,12 @@ namespace Lucene.Net.Analysis.Util
             return result;
         }
 
-        public static CharArraySet GetWordSet(TextReader reader, Lucene.Net.Util.Version matchVersion)
+        public static CharArraySet GetWordSet(TextReader reader, Lucene.Net.Util.Version? matchVersion)
         {
             return GetWordSet(reader, new CharArraySet(matchVersion, INITIAL_CAPACITY, false));
         }
 
-        public static CharArraySet GetWordSet(TextReader reader, String comment, Lucene.Net.Util.Version matchVersion)
+        public static CharArraySet GetWordSet(TextReader reader, String comment, Lucene.Net.Util.Version? matchVersion)
         {
             return GetWordSet(reader, comment, new CharArraySet(matchVersion, INITIAL_CAPACITY, false));
         }


[09/50] [abbrv] git commit: Port: csproj check in

Posted by mh...@apache.org.
Port: csproj check in


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/d9635bfc
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/d9635bfc
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/d9635bfc

Branch: refs/heads/branch_4x
Commit: d9635bfc391f870b53a4ff53220f5a9dbe9af4c0
Parents: f1544d6
Author: James Blair <jm...@gmail.com>
Authored: Thu Jul 11 18:37:30 2013 -0400
Committer: James Blair <jm...@gmail.com>
Committed: Thu Jul 11 18:37:30 2013 -0400

----------------------------------------------------------------------
 test/core/Lucene.Net.Test.csproj | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d9635bfc/test/core/Lucene.Net.Test.csproj
----------------------------------------------------------------------
diff --git a/test/core/Lucene.Net.Test.csproj b/test/core/Lucene.Net.Test.csproj
index 89f6ca3..8c12684 100644
--- a/test/core/Lucene.Net.Test.csproj
+++ b/test/core/Lucene.Net.Test.csproj
@@ -557,15 +557,20 @@
       <SubType>Code</SubType>
     </Compile>
     <Compile Include="Util\TestRamUsageEstimator.cs" />
+    <Compile Include="Util\TestRamUsageEstimatorOnWildAnimals.cs" />
+    <Compile Include="Util\TestRecyclingByteBlockAllocator.cs" />
+    <Compile Include="Util\TestRecyclingIntBlockAllocator.cs" />
+    <Compile Include="Util\TestRollingBuffer.cs" />
+    <Compile Include="Util\TestSentineIntSet.cs" />
+    <Compile Include="Util\TestSetOnce.cs" />
     <Compile Include="Util\TestSmallFloat.cs">
       <SubType>Code</SubType>
     </Compile>
-    <Compile Include="Util\TestSortedVIntList.cs" />
-    <Compile Include="Util\TestStringHelper.cs">
-      <SubType>Code</SubType>
-    </Compile>
-    <Compile Include="Util\TestStringIntern.cs" />
+    <Compile Include="Util\TestSorterTemplate.cs" />
+    <Compile Include="Util\TestUnicodeUtil.cs" />
     <Compile Include="Util\TestVersion.cs" />
+    <Compile Include="Util\TestVersionComparator.cs" />
+    <Compile Include="Util\TestVirtualMethod.cs" />
     <Compile Include="Util\_TestUtil.cs">
       <SubType>Code</SubType>
     </Compile>


[34/50] [abbrv] git commit: Fix issue reading VLongs as well

Posted by mh...@apache.org.
Fix issue reading VLongs as well


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/401752bd
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/401752bd
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/401752bd

Branch: refs/heads/branch_4x
Commit: 401752bd37a3bee89787db23556421bd3b684a3e
Parents: 4c65df0
Author: Paul Irwin <pa...@gmail.com>
Authored: Wed Aug 7 10:00:26 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Wed Aug 7 10:00:26 2013 -0400

----------------------------------------------------------------------
 src/core/Codecs/BlockTreeTermsReader.cs |  2 +-
 src/core/Store/BufferedIndexInput.cs    | 64 +++++++++++++++------------
 src/core/Store/ByteArrayDataInput.cs    | 64 +++++++++++++++------------
 src/core/Store/DataInput.cs             | 66 ++++++++++++++++------------
 4 files changed, 113 insertions(+), 83 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/401752bd/src/core/Codecs/BlockTreeTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/BlockTreeTermsReader.cs b/src/core/Codecs/BlockTreeTermsReader.cs
index f0148ce..dd73e3d 100644
--- a/src/core/Codecs/BlockTreeTermsReader.cs
+++ b/src/core/Codecs/BlockTreeTermsReader.cs
@@ -3247,7 +3247,7 @@ namespace Lucene.Net.Codecs
                         {
                             parent.term.Grow(termLength);
                         }
-                        Array.Copy(suffixBytes, startBytePos, parent.term.bytes, prefix, suffix);
+                        System.Buffer.BlockCopy(suffixBytes, startBytePos, parent.term.bytes, prefix, suffix);
                     }
                 }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/401752bd/src/core/Store/BufferedIndexInput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/BufferedIndexInput.cs b/src/core/Store/BufferedIndexInput.cs
index 804ccdd..c701b76 100644
--- a/src/core/Store/BufferedIndexInput.cs
+++ b/src/core/Store/BufferedIndexInput.cs
@@ -259,34 +259,44 @@ namespace Lucene.Net.Store
         {
             if (9 <= bufferLength - bufferPosition)
             {
+                // .NET Port: going back to old style code
                 byte b = buffer[bufferPosition++];
-                if (b >= 0) return b;
-                long i = b & 0x7FL;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7FL) << 7;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7FL) << 14;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7FL) << 21;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7FL) << 28;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7FL) << 35;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7FL) << 42;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7FL) << 49;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7FL) << 56;
-                if (b >= 0) return i;
-                throw new System.IO.IOException("Invalid vLong detected (negative values disallowed)");
+                long i = b & 0x7F;
+                for (int shift = 7; (b & 0x80) != 0; shift += 7)
+                {
+                    b = buffer[bufferPosition++];
+                    i |= (b & 0x7FL) << shift;
+                }
+                return i;
+
+                //byte b = buffer[bufferPosition++];
+                //if (b >= 0) return b;
+                //long i = b & 0x7FL;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7FL) << 7;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7FL) << 14;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7FL) << 21;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7FL) << 28;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7FL) << 35;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7FL) << 42;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7FL) << 49;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7FL) << 56;
+                //if (b >= 0) return i;
+                //throw new System.IO.IOException("Invalid vLong detected (negative values disallowed)");
             }
             else
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/401752bd/src/core/Store/ByteArrayDataInput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/ByteArrayDataInput.cs b/src/core/Store/ByteArrayDataInput.cs
index ea36c6b..741f2da 100644
--- a/src/core/Store/ByteArrayDataInput.cs
+++ b/src/core/Store/ByteArrayDataInput.cs
@@ -121,34 +121,44 @@ namespace Lucene.Net.Store
 
         public override long ReadVLong()
         {
+            // .NET Port: going back to old style code
             byte b = bytes[pos++];
-            if (b >= 0) return b;
-            long i = b & 0x7FL;
-            b = bytes[pos++];
-            i |= (b & 0x7FL) << 7;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            i |= (b & 0x7FL) << 14;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            i |= (b & 0x7FL) << 21;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            i |= (b & 0x7FL) << 28;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            i |= (b & 0x7FL) << 35;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            i |= (b & 0x7FL) << 42;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            i |= (b & 0x7FL) << 49;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            i |= (b & 0x7FL) << 56;
-            if (b >= 0) return i;
-            throw new InvalidOperationException("Invalid vLong detected (negative values disallowed)");
+            long i = b & 0x7F;
+            for (int shift = 7; (b & 0x80) != 0; shift += 7)
+            {
+                b = bytes[pos++];
+                i |= (b & 0x7FL) << shift;
+            }
+            return i;
+
+            //byte b = bytes[pos++];
+            //if (b >= 0) return b;
+            //long i = b & 0x7FL;
+            //b = bytes[pos++];
+            //i |= (b & 0x7FL) << 7;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //i |= (b & 0x7FL) << 14;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //i |= (b & 0x7FL) << 21;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //i |= (b & 0x7FL) << 28;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //i |= (b & 0x7FL) << 35;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //i |= (b & 0x7FL) << 42;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //i |= (b & 0x7FL) << 49;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //i |= (b & 0x7FL) << 56;
+            //if (b >= 0) return i;
+            //throw new InvalidOperationException("Invalid vLong detected (negative values disallowed)");
         }
 
         public override byte ReadByte()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/401752bd/src/core/Store/DataInput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/DataInput.cs b/src/core/Store/DataInput.cs
index 07310a1..3af7802 100644
--- a/src/core/Store/DataInput.cs
+++ b/src/core/Store/DataInput.cs
@@ -97,6 +97,16 @@ namespace Lucene.Net.Store
 
         public virtual long ReadVLong()
         {
+            // .NET Port: going back to old style code
+            byte b = ReadByte();
+            long i = b & 0x7F;
+            for (int shift = 7; (b & 0x80) != 0; shift += 7)
+            {
+                b = ReadByte();
+                i |= (b & 0x7FL) << shift;
+            }
+            return i;
+
             /* This is the original code of this method,
              * but a Hotspot bug (see LUCENE-2975) corrupts the for-loop if
              * ReadByte() is inlined. So the loop was unwinded!
@@ -108,34 +118,34 @@ namespace Lucene.Net.Store
             }
             return i;
             */
-            byte b = ReadByte();
-            if (b >= 0) return b;
-            long i = b & 0x7FL;
-            b = ReadByte();
-            i |= (b & 0x7FL) << 7;
-            if (b >= 0) return i;
-            b = ReadByte();
-            i |= (b & 0x7FL) << 14;
-            if (b >= 0) return i;
-            b = ReadByte();
-            i |= (b & 0x7FL) << 21;
-            if (b >= 0) return i;
-            b = ReadByte();
-            i |= (b & 0x7FL) << 28;
-            if (b >= 0) return i;
-            b = ReadByte();
-            i |= (b & 0x7FL) << 35;
-            if (b >= 0) return i;
-            b = ReadByte();
-            i |= (b & 0x7FL) << 42;
-            if (b >= 0) return i;
-            b = ReadByte();
-            i |= (b & 0x7FL) << 49;
-            if (b >= 0) return i;
-            b = ReadByte();
-            i |= (b & 0x7FL) << 56;
-            if (b >= 0) return i;
-            throw new System.IO.IOException("Invalid vLong detected (negative values disallowed)");
+            //byte b = ReadByte();
+            //if (b >= 0) return b;
+            //long i = b & 0x7FL;
+            //b = ReadByte();
+            //i |= (b & 0x7FL) << 7;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //i |= (b & 0x7FL) << 14;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //i |= (b & 0x7FL) << 21;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //i |= (b & 0x7FL) << 28;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //i |= (b & 0x7FL) << 35;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //i |= (b & 0x7FL) << 42;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //i |= (b & 0x7FL) << 49;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //i |= (b & 0x7FL) << 56;
+            //if (b >= 0) return i;
+            //throw new System.IO.IOException("Invalid vLong detected (negative values disallowed)");
         }
 
         public virtual string ReadString()


[16/50] [abbrv] Massive cleanup, reducing compiler errors

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/Packed/BlockPackedWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Packed/BlockPackedWriter.cs b/src/core/Util/Packed/BlockPackedWriter.cs
index 139c68e..9564e34 100644
--- a/src/core/Util/Packed/BlockPackedWriter.cs
+++ b/src/core/Util/Packed/BlockPackedWriter.cs
@@ -13,7 +13,7 @@ namespace Lucene.Net.Util.Packed
         {
         }
 
-        protected void Flush()
+        protected override void Flush()
         {
             //assert off > 0;
             long min = long.MaxValue, max = long.MinValue;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/Packed/MonotonicAppendingLongBuffer.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Packed/MonotonicAppendingLongBuffer.cs b/src/core/Util/Packed/MonotonicAppendingLongBuffer.cs
index e4f517b..0ee7421 100644
--- a/src/core/Util/Packed/MonotonicAppendingLongBuffer.cs
+++ b/src/core/Util/Packed/MonotonicAppendingLongBuffer.cs
@@ -26,7 +26,7 @@ namespace Lucene.Net.Util.Packed
             averages = new float[16];
         }
 
-        internal long Get(int block, int element)
+        internal override long Get(int block, int element)
         {
             if (block == valuesOff)
             {
@@ -88,7 +88,7 @@ namespace Lucene.Net.Util.Packed
             }
         }
 
-        public Iterator GetIterator()
+        internal override AbstractAppendingLongBuffer.Iterator GetIterator()
         {
             return new Iterator(this);
         }
@@ -102,7 +102,7 @@ namespace Lucene.Net.Util.Packed
                 this.parent = parent;
             }
 
-            internal void FillValues()
+            internal override void FillValues()
             {
                 if (vOff == parent.valuesOff)
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/Packed/Packed64SingleBlock.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Packed/Packed64SingleBlock.cs b/src/core/Util/Packed/Packed64SingleBlock.cs
index b01d064..b41cb14 100644
--- a/src/core/Util/Packed/Packed64SingleBlock.cs
+++ b/src/core/Util/Packed/Packed64SingleBlock.cs
@@ -8,7 +8,7 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Util.Packed
 {
-    public class Packed64SingleBlock : PackedInts.Mutable
+    public abstract class Packed64SingleBlock : PackedInts.Mutable
     {
         public const int MAX_SUPPORTED_BITS_PER_VALUE = 32;
 
@@ -184,9 +184,12 @@ namespace Lucene.Net.Util.Packed
         }
 
 
-        protected override PackedInts.Format GetFormat()
+        protected override PackedInts.Format Format
         {
-            return PackedInts.Format.PACKED_SINGLE_BLOCK;
+            get
+            {
+                return PackedInts.Format.PACKED_SINGLE_BLOCK;
+            }
         }
 
         public override String ToString()
@@ -521,7 +524,7 @@ namespace Lucene.Net.Util.Packed
             {
             }
 
-            public long Get(int index)
+            public override long Get(int index)
             {
                 int o = Number.URShift(index, 2);
                 int b = index & 3;
@@ -529,7 +532,7 @@ namespace Lucene.Net.Util.Packed
                 return Number.URShift(blocks[o], shift) & 65535L;
             }
 
-            public void Set(int index, long value)
+            public override void Set(int index, long value)
             {
                 int o = Number.URShift(index, 2);
                 int b = index & 3;
@@ -585,5 +588,9 @@ namespace Lucene.Net.Util.Packed
                 blocks[o] = (blocks[o] & ~(4294967295L << shift)) | (value << shift);
             }
         }
+
+        public abstract override void Set(int index, long value);
+
+        public abstract override long Get(int index);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Util/RollingBuffer.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/RollingBuffer.cs b/src/core/Util/RollingBuffer.cs
index 0534d02..3ae2980 100644
--- a/src/core/Util/RollingBuffer.cs
+++ b/src/core/Util/RollingBuffer.cs
@@ -38,7 +38,7 @@ namespace Lucene.Net.Util
 
         protected abstract T NewInstance();
 
-        public void Reset()
+        public virtual void Reset()
         {
             nextWrite--;
             while (count > 0)


[46/50] [abbrv] git commit: Some bug fixes

Posted by mh...@apache.org.
Some bug fixes


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/86087f1d
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/86087f1d
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/86087f1d

Branch: refs/heads/branch_4x
Commit: 86087f1da88609097e60dc4f11aec342eeb4f8e3
Parents: c188376
Author: Paul Irwin <pa...@gmail.com>
Authored: Fri Aug 9 15:39:15 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Fri Aug 9 15:39:15 2013 -0400

----------------------------------------------------------------------
 src/core/Store/BufferedIndexOutput.cs |  2 +-
 src/core/Util/Fst/BytesStore.cs       | 14 +++++++-------
 2 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/86087f1d/src/core/Store/BufferedIndexOutput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/BufferedIndexOutput.cs b/src/core/Store/BufferedIndexOutput.cs
index 8c9c96f..8ebbb90 100644
--- a/src/core/Store/BufferedIndexOutput.cs
+++ b/src/core/Store/BufferedIndexOutput.cs
@@ -96,7 +96,7 @@ namespace Lucene.Net.Store
                     while (pos < length)
                     {
                         pieceLength = (length - pos < bytesLeft) ? length - pos : bytesLeft;
-                        Array.Copy(b, pos + offset, buffer, bufferPosition, pieceLength);
+                        Buffer.BlockCopy(b, pos + offset, buffer, bufferPosition, pieceLength);
                         pos += pieceLength;
                         bufferPosition += pieceLength;
                         // if the buffer is full, flush it

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/86087f1d/src/core/Util/Fst/BytesStore.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/BytesStore.cs b/src/core/Util/Fst/BytesStore.cs
index ba1c6ce..d441a9b 100644
--- a/src/core/Util/Fst/BytesStore.cs
+++ b/src/core/Util/Fst/BytesStore.cs
@@ -77,7 +77,7 @@ namespace Lucene.Net.Util.Fst
                 var chunk = blockSize - nextWrite;
                 if (len <= chunk)
                 {
-                    Array.Copy(b, offset, current, nextWrite, len);
+                    System.Buffer.BlockCopy(b, offset, current, nextWrite, len);
                     nextWrite += len;
                     break;
                 }
@@ -85,7 +85,7 @@ namespace Lucene.Net.Util.Fst
                 {
                     if (chunk > 0)
                     {
-                        Array.Copy(b, offset, current, nextWrite, chunk);
+                        System.Buffer.BlockCopy(b, offset, current, nextWrite, chunk);
                         offset += chunk;
                         len -= chunk;
                     }
@@ -121,13 +121,13 @@ namespace Lucene.Net.Util.Fst
             {
                 if (len <= downTo)
                 {
-                    Array.Copy(b, offset, block, downTo - len, len);
+                    System.Buffer.BlockCopy(b, offset, block, downTo - len, len);
                     break;
                 }
                 else
                 {
                     len -= downTo;
-                    Array.Copy(b, offset + len, block, 0, downTo);
+                    System.Buffer.BlockCopy(b, offset + len, block, 0, downTo);
                     blockIndex--;
                     block = blocks[blockIndex];
                     downTo = blockSize;
@@ -282,7 +282,7 @@ namespace Lucene.Net.Util.Fst
             if (current != null)
             {
                 var lastBuffer = new sbyte[nextWrite];
-                Array.Copy(current, 0, lastBuffer, 0, nextWrite);
+                System.Buffer.BlockCopy(current, 0, lastBuffer, 0, nextWrite);
                 blocks[blocks.Count - 1] = lastBuffer;
                 current = null;
             }
@@ -332,7 +332,7 @@ namespace Lucene.Net.Util.Fst
                     var chunkLeft = _parent.blockSize - nextRead;
                     if (len <= chunkLeft)
                     {
-                        Array.Copy(current, nextRead, b, offset, len);
+                        System.Buffer.BlockCopy(current, nextRead, b, offset, len);
                         nextRead += len;
                         break;
                     }
@@ -340,7 +340,7 @@ namespace Lucene.Net.Util.Fst
                     {
                         if (chunkLeft > 0)
                         {
-                            Array.Copy(current, nextRead, b, offset, chunkLeft);
+                            System.Buffer.BlockCopy(current, nextRead, b, offset, chunkLeft);
                             offset += chunkLeft;
                             len -= chunkLeft;
                         }


[05/50] [abbrv] git commit: Port: more util unit tests

Posted by mh...@apache.org.
Port: more util unit tests


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/06f5d4b8
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/06f5d4b8
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/06f5d4b8

Branch: refs/heads/branch_4x
Commit: 06f5d4b8bc7a979b87831ba843bd936bc5d27055
Parents: d3c00f5
Author: James Blair <jm...@gmail.com>
Authored: Thu Jul 11 16:33:12 2013 -0400
Committer: James Blair <jm...@gmail.com>
Committed: Thu Jul 11 16:33:12 2013 -0400

----------------------------------------------------------------------
 test/core/Support/RandomExtensions.cs      |    9 +-
 test/core/Util/TestDoubleBarrelLRUCache.cs |   12 +-
 test/core/Util/TestIOUtils.cs              |  104 +++
 test/core/Util/TestIntsRef.cs              |   33 +
 test/core/Util/TestMaxFailureRule.cs       |   98 +++
 test/core/Util/TestNamedSPILoader.cs       |   32 +
 test/core/Util/TestNumericUtils.cs         | 1040 ++++++++++++-----------
 test/core/Util/TestOpenBitSet.cs           |  273 +++---
 test/core/Util/TestPagedBytes.cs           |  131 +++
 9 files changed, 1137 insertions(+), 595 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/06f5d4b8/test/core/Support/RandomExtensions.cs
----------------------------------------------------------------------
diff --git a/test/core/Support/RandomExtensions.cs b/test/core/Support/RandomExtensions.cs
index 49625e5..8ec476d 100644
--- a/test/core/Support/RandomExtensions.cs
+++ b/test/core/Support/RandomExtensions.cs
@@ -23,14 +23,7 @@ namespace Lucene.Net.Test.Support
 
         public static bool NextBool(this Random random)
         {
-            var randInt = random.Next();
-            var adjusted = randInt - (int.MaxValue/2);
-            if (adjusted == 0)
-            {
-                BoolTieBreak = !BoolTieBreak;
-                return BoolTieBreak;
-            }
-            return adjusted > 0 ? true : false;
+            return random.NextDouble() > 0.5;
         }
 
         public static void NextBytes(this Random random, sbyte[] bytes)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/06f5d4b8/test/core/Util/TestDoubleBarrelLRUCache.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestDoubleBarrelLRUCache.cs b/test/core/Util/TestDoubleBarrelLRUCache.cs
index fabe4ab..d7d4408 100644
--- a/test/core/Util/TestDoubleBarrelLRUCache.cs
+++ b/test/core/Util/TestDoubleBarrelLRUCache.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Test.Util
             // access every 2nd item in cache
             for (var i = 0; i < n; i += 2)
             {
-                assertNotNull(cache[new CloneableInteger(i)]);
+                Assert.NotNull(cache[new CloneableInteger(i)]);
             }
 
             // add n/2 elements to cache, the ones that weren't
@@ -34,7 +34,7 @@ namespace Lucene.Net.Test.Util
             // access every 4th item in cache
             for (var i = 0; i < n; i += 4)
             {
-                assertNotNull(cache[new CloneableInteger(i)]);
+                Assert.NotNull(cache[new CloneableInteger(i)]);
             }
 
             // add 3/4n elements to cache, the ones that weren't
@@ -47,12 +47,12 @@ namespace Lucene.Net.Test.Util
             // access every 4th item in cache
             for (var i = 0; i < n; i += 4)
             {
-                assertNotNull(cache[new CloneableInteger(i)]);
+                Assert.NotNull(cache[new CloneableInteger(i)]);
             }
         }
 
         [Test]
-        public void TestLRUCache()
+        public virtual void TestLRUCache()
         {
             var n = 100;
             TestCache(new DoubleBarrelLRUCache<CloneableInteger, object>(n), n);
@@ -121,14 +121,14 @@ namespace Lucene.Net.Test.Util
         }
 
         long totMiss, totHit;
-        void AddResults(long miss, long hit)
+        internal virtual void AddResults(long miss, long hit)
         {
             totMiss += miss;
             totHit += hit;
         }
 
         [Test]
-        public void TestThreadCorrectness()
+        public virtual void TestThreadCorrectness()
         {
             var NUM_THREADS = 4;
             var CACHE_SIZE = 512;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/06f5d4b8/test/core/Util/TestIOUtils.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestIOUtils.cs b/test/core/Util/TestIOUtils.cs
new file mode 100644
index 0000000..0a6a56e
--- /dev/null
+++ b/test/core/Util/TestIOUtils.cs
@@ -0,0 +1,104 @@
+using System;
+using System.IO;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestIOUtils : LuceneTestCase
+    {
+        internal sealed class BrokenCloseable : IDisposable
+        {
+            internal readonly int i;
+
+            public BrokenCloseable(int i)
+            {
+                this.i = i;
+            }
+
+            public void Dispose()
+            {
+                throw new IOException("TEST-IO-EXCEPTION-" + i);
+            }
+        }
+
+        internal sealed class TestException : Exception
+        {
+            public TestException()
+                : base("BASE-EXCEPTION") { }
+        }
+
+        [Test]
+        public virtual void TestSuppressedExceptions()
+        {
+            if (!Constants.JRE_IS_MINIMUM_JAVA7)
+            {
+                Console.Error.WriteLine("WARNING: TestIOUtils.TestSuppressedExceptions: Full test coverage only with Java 7, as suppressed exception recording is not supported before.");
+            }
+
+            // test with prior exception
+            try
+            {
+                var t = new TestException();
+                IOUtils.CloseWhileHandlingException(t, new BrokenCloseable(1), new BrokenCloseable(2));
+            }
+            catch (TestException e1)
+            {
+                assertEquals("BASE-EXCEPTION", e1.Message);
+                var sw = new StringWriter();
+                //PrintWriter pw = new PrintWriter(sw);
+                sw.Write(e1.StackTrace);
+                //e1.PrintStackTrace(pw);
+                //pw.Flush();
+                var trace = sw.ToString();
+                if (VERBOSE)
+                {
+                    Console.WriteLine("TestIOUtils.testSuppressedExceptions: Thrown Exception stack trace:");
+                    Console.WriteLine(trace);
+                }
+                if (Constants.JRE_IS_MINIMUM_JAVA7)
+                {
+                    assertTrue("Stack trace does not contain first suppressed Exception: " + trace,
+                      trace.Contains("java.io.IOException: TEST-IO-EXCEPTION-1"));
+                    assertTrue("Stack trace does not contain second suppressed Exception: " + trace,
+                      trace.Contains("java.io.IOException: TEST-IO-EXCEPTION-2"));
+                }
+            }
+            catch (IOException e2)
+            {
+                Fail("IOException should not be thrown here");
+            }
+
+            // test without prior exception
+            try
+            {
+                IOUtils.CloseWhileHandlingException((TestException)null, new BrokenCloseable(1), new BrokenCloseable(2));
+            }
+            catch (TestException e1)
+            {
+                Fail("TestException should not be thrown here");
+            }
+            catch (IOException e2)
+            {
+                assertEquals("TEST-IO-EXCEPTION-1", e2.Message);
+                var sw = new StringWriter();
+                //PrintWriter pw = new PrintWriter(sw);
+                sw.Write(e2.StackTrace);
+                //e2.printStackTrace(pw);
+                //pw.Flush();
+                var trace = sw.ToString();
+                if (VERBOSE)
+                {
+                    Console.WriteLine("TestIOUtils.testSuppressedExceptions: Thrown Exception stack trace:");
+                    Console.WriteLine(trace);
+                }
+                if (Constants.JRE_IS_MINIMUM_JAVA7)
+                {
+                    assertTrue("Stack trace does not contain suppressed Exception: " + trace,
+                      trace.Contains("java.io.IOException: TEST-IO-EXCEPTION-2"));
+                }
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/06f5d4b8/test/core/Util/TestIntsRef.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestIntsRef.cs b/test/core/Util/TestIntsRef.cs
new file mode 100644
index 0000000..3d12c9d
--- /dev/null
+++ b/test/core/Util/TestIntsRef.cs
@@ -0,0 +1,33 @@
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestIntsRef : LuceneTestCase
+    {
+        [Test]
+        public virtual void TestEmpty()
+        {
+            var i = new IntsRef();
+            Assert.Equals(IntsRef.EMPTY_INTS, i.ints);
+            Assert.Equals(0, i.offset);
+            Assert.Equals(0, i.length);
+        }
+
+        [Test]
+        public virtual void TestFromInts()
+        {
+            var ints = new int[] { 1, 2, 3, 4 };
+            var i = new IntsRef(ints, 0, 4);
+            Assert.Equals(ints, i.ints);
+            Assert.Equals(0, i.offset);
+            Assert.Equals(4, i.length);
+
+            var i2 = new IntsRef(ints, 1, 3);
+            Assert.Equals(new IntsRef(new int[] { 2, 3, 4 }, 0, 3), i2);
+
+            Assert.IsFalse(i.Equals(i2));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/06f5d4b8/test/core/Util/TestMaxFailureRule.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestMaxFailureRule.cs b/test/core/Util/TestMaxFailureRule.cs
new file mode 100644
index 0000000..effae23
--- /dev/null
+++ b/test/core/Util/TestMaxFailureRule.cs
@@ -0,0 +1,98 @@
+using System;
+using System.Text;
+using Lucene.Net.Store;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestMaxFailureRule : WithNestedTests
+    {
+        public SystemPropertiesRestoreRule restoreSysProps = new SystemPropertiesRestoreRule();
+
+        public TestMaxFailureRule() : base(true) { }
+
+        public class Nested : WithNestedTests.AbstractNestedTest
+        {
+            public const int TOTAL_ITERS = 500;
+            public const int DESIRED_FAILURES = TOTAL_ITERS / 10;
+            private int numFails = 0;
+            private int numIters = 0;
+
+            [Repeat(TOTAL_ITERS)]
+            [Test]
+            public void testFailSometimes()
+            {
+                numIters++;
+                bool fail = new Random().Next(5) == 0;
+                if (fail) numFails++;
+                // some seeds are really lucky ... so cheat.
+                if (numFails < DESIRED_FAILURES &&
+                    DESIRED_FAILURES <= TOTAL_ITERS - numIters)
+                {
+                    fail = true;
+                }
+                Assert.IsFalse(fail);
+            }
+        }
+
+        private sealed class AnonymousRunListener : RunListener
+        {
+            internal char lastTest;
+
+            public override void TestStarted(Description description)
+            {
+                lastTest = 'S'; // success.
+            }
+
+            public override void TestAssumptionFailure(MockRAMDirectory.Failure failure)
+            {
+                lastTest = 'A'; // assumption failure.
+            }
+
+            public override void TestFailure(MockRAMDirectory.Failure failure)
+            {
+                lastTest = 'F'; // failure
+            }
+
+            public override void TestFinished(Description description)
+            {
+                results.append(lastTest);
+            }
+        }
+
+        [Test]
+        public virtual void TestMaxFailures()
+        {
+            int maxFailures = LuceneTestCase.IgnoreAfterMaxFailures.maxFailures;
+            int failuresSoFar = LuceneTestCase.IgnoreAfterMaxFailures.failuresSoFar;
+            System.clearProperty(SysGlobals.SYSPROP_ITERATIONS());
+            try
+            {
+                LuceneTestCase.IgnoreAfterMaxFailures.maxFailures = 2;
+                LuceneTestCase.IgnoreAfterMaxFailures.failuresSoFar = 0;
+
+                JUnitCore core = new JUnitCore();
+                var results = new StringBuilder();
+                core.AddListener(new AnonymousRunListener());
+
+                Result result = core.Run(typeof(Nested)); // was Nested.class
+                Assert.Equals(500, result.RunCount);
+                Assert.Equals(0, result.IgnoreCount);
+                Assert.Equals(2, result.FailureCount);
+
+                // Make sure we had exactly two failures followed by assumption-failures
+                // resulting from ignored tests.
+                Assert.IsTrue(results.ToString(),
+                    results.ToString().Matches("(S*F){2}A+"));
+
+            }
+            finally
+            {
+                LuceneTestCase.IgnoreAfterMaxFailures.maxFailures = maxFailures;
+                LuceneTestCase.IgnoreAfterMaxFailures.failuresSoFar = failuresSoFar;
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/06f5d4b8/test/core/Util/TestNamedSPILoader.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestNamedSPILoader.cs b/test/core/Util/TestNamedSPILoader.cs
new file mode 100644
index 0000000..896102f
--- /dev/null
+++ b/test/core/Util/TestNamedSPILoader.cs
@@ -0,0 +1,32 @@
+using System;
+using Lucene.Net.Codecs;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestNamedSPILoader : LuceneTestCase
+    {
+        [Test]
+        public virtual void TestLookup()
+        {
+            var codec = Codec.ForName("Lucene42");
+            assertEquals("Lucene42", codec.Name);
+        }
+
+        // we want an exception if its not found.
+        [Test]
+        public virtual void TestBogusLookup()
+        {
+            Assert.Throws<ArgumentException>(() => Codec.ForName("dskfdskfsdfksdfdsf"));
+        }
+
+        [Test]
+        public virtual void TestAvailableServices()
+        {
+            var codecs = Codec.AvailableCodecs;
+            Assert.IsTrue(codecs.Contains("Lucene42"));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/06f5d4b8/test/core/Util/TestNumericUtils.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestNumericUtils.cs b/test/core/Util/TestNumericUtils.cs
index c2064a5..0140d9a 100644
--- a/test/core/Util/TestNumericUtils.cs
+++ b/test/core/Util/TestNumericUtils.cs
@@ -17,388 +17,443 @@
 
 using System;
 using System.Collections.Generic;
-using System.Linq;
 using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
 using NUnit.Framework;
 
 namespace Lucene.Net.Util
 {
-	
     [TestFixture]
-	public class TestNumericUtils:LuceneTestCase
-	{
-		private class AnonymousClassLongRangeBuilder:NumericUtils.LongRangeBuilder
-		{
-			public AnonymousClassLongRangeBuilder(long lower, long upper, bool useBitSet, Lucene.Net.Util.OpenBitSet bits, System.Collections.IEnumerator neededBounds, System.Collections.IEnumerator neededShifts,TestNumericUtils enclosingInstance)
-			{
-				InitBlock(lower, upper, useBitSet, bits, neededBounds, neededShifts, enclosingInstance);
-			}
-			private void  InitBlock(long lower, long upper, bool useBitSet, Lucene.Net.Util.OpenBitSet bits, System.Collections.IEnumerator neededBounds, System.Collections.IEnumerator neededShifts,TestNumericUtils enclosingInstance)
-			{
-				this.lower = lower;
-				this.upper = upper;
-				this.useBitSet = useBitSet;
-				this.bits = bits;
-				this.neededBounds = neededBounds;
-                this.neededShifts = neededShifts;
-				this.enclosingInstance = enclosingInstance;
-			}
-			private long lower;
-			private long upper;
-			private bool useBitSet;
-			private Lucene.Net.Util.OpenBitSet bits;
-			private System.Collections.IEnumerator neededBounds;
-            private System.Collections.IEnumerator neededShifts;
-			private TestNumericUtils enclosingInstance;
-			public TestNumericUtils Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			//@Override
-			public override void  AddRange(long min, long max, int shift)
-			{
-				Assert.IsTrue(min >= lower && min <= upper && max >= lower && max <= upper, "min, max should be inside bounds");
-				if (useBitSet)
-					for (long l = min; l <= max; l++)
-					{
-						Assert.IsFalse(bits.GetAndSet(l - lower), "ranges should not overlap");
-                        // extra exit condition to prevent overflow on MAX_VALUE
-                        if (l == max) break;
-					}
-                if (neededBounds == null || neededShifts == null) return;
-				// make unsigned longs for easier display and understanding
-				min ^= unchecked((long) 0x8000000000000000L);
-				max ^= unchecked((long) 0x8000000000000000L);
-				//System.out.println("Long.valueOf(0x"+Long.toHexString(min>>>shift)+"L),Long.valueOf(0x"+Long.toHexString(max>>>shift)+"L)/*shift="+shift+"*/,");
-                neededShifts.MoveNext();
-                Assert.AreEqual(((Int32)neededShifts.Current), shift, "shift");
-                neededBounds.MoveNext();
-                unchecked
+    public class TestNumericUtils : LuceneTestCase
+    {
+        [Test]
+        public virtual void TestLongConversionAndOrdering()
+        {
+            // generate a series of encoded longs, each numerical one bigger than the one before
+            BytesRef last = null, act = new BytesRef(NumericUtils.BUF_SIZE_LONG);
+            for (var l = -100000L; l < 100000L; l++)
+            {
+                NumericUtils.LongToPrefixCodedBytes(l, 0, act);
+                if (last != null)
                 {
-                    Assert.AreEqual((long)neededBounds.Current, Number.URShift(min, shift), "inner min bound");
-                    neededBounds.MoveNext();
-                    Assert.AreEqual((long)neededBounds.Current, Number.URShift(max, shift), "inner max bound");
+                    // test if smaller
+                    Assert.IsTrue(last.CompareTo(act) < 0, "actual bigger than last (BytesRef)");
+                    Assert.IsTrue(last.Utf8ToString().CompareTo(act.Utf8ToString()) < 0, "actual bigger than last (as string)");
                 }
-			}
-		}
-
-		private class AnonymousClassIntRangeBuilder:NumericUtils.IntRangeBuilder
-		{
-            public AnonymousClassIntRangeBuilder(int lower, int upper, bool useBitSet, Lucene.Net.Util.OpenBitSet bits, IEnumerator<int> neededBounds, IEnumerator<int> neededShifts, TestNumericUtils enclosingInstance)
-			{
-                InitBlock(lower, upper, useBitSet, bits, neededBounds, neededShifts,enclosingInstance);
-			}
-            private void InitBlock(int lower, int upper, bool useBitSet, Lucene.Net.Util.OpenBitSet bits, IEnumerator<int> neededBounds, IEnumerator<int> neededShifts, TestNumericUtils enclosingInstance)
-			{
-				this.lower = lower;
-				this.upper = upper;
-				this.useBitSet = useBitSet;
-				this.bits = bits;
-				this.neededBounds = neededBounds;
-                this.neededShifts = neededShifts;
-				this.enclosingInstance = enclosingInstance;
-			}
-			private int lower;
-			private int upper;
-			private bool useBitSet;
-			private Lucene.Net.Util.OpenBitSet bits;
-            private IEnumerator<int> neededBounds;
-            private IEnumerator<int> neededShifts;
-			private TestNumericUtils enclosingInstance;
-			public TestNumericUtils Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-			//@Override
-			public override void  AddRange(int min, int max, int shift)
-			{
-				Assert.IsTrue(min >= lower && min <= upper && max >= lower && max <= upper, "min, max should be inside bounds");
-				if (useBitSet)
-					for (int i = min; i <= max; i++)
-					{
-						Assert.IsFalse(bits.GetAndSet(i - lower), "ranges should not overlap");
-                        // extra exit condition to prevent overflow on MAX_VALUE
-                        if (i == max) break;
-					}
-                if (neededBounds == null) return;
-				// make unsigned ints for easier display and understanding
-				min ^= unchecked((int) 0x80000000);
-				max ^= unchecked((int) 0x80000000);
-                neededShifts.MoveNext();
-                Assert.AreEqual(((int)neededShifts.Current), shift,"shift");
-				//System.out.println("new Integer(0x"+Integer.toHexString(min>>>shift)+"),new Integer(0x"+Integer.toHexString(max>>>shift)+"),");
-                neededBounds.MoveNext();
-				Assert.AreEqual(((System.Int32) neededBounds.Current), Number.URShift(min, shift), "inner min bound");
-                neededBounds.MoveNext();
-				Assert.AreEqual(((System.Int32) neededBounds.Current), Number.URShift(max, shift), "inner max bound");
-			}
-		}
-		
+                // test is back and forward conversion works
+                Assert.IsTrue(l.Equals(NumericUtils.PrefixCodedToLong(act)), "forward and back conversion should generate same long");
+                //Assert.Equals("forward and back conversion should generate same long", l,
+                             //NumericUtils.PrefixCodedToLong(act));
+                // next step
+                last = act;
+                act = new BytesRef(NumericUtils.BUF_SIZE_LONG);
+            }
+        }
+
         [Test]
-		public virtual void  TestLongConversionAndOrdering()
-		{
-			// generate a series of encoded longs, each numerical one bigger than the one before
-			System.String last = null;
-			for (long l = - 100000L; l < 100000L; l++)
-			{
-				System.String act = NumericUtils.LongToPrefixCoded(l);
-				if (last != null)
-				{
-					// test if smaller
-					Assert.IsTrue(String.CompareOrdinal(last, act) < 0, "actual bigger than last");
-				}
-				// test is back and forward conversion works
-				Assert.AreEqual(l, NumericUtils.PrefixCodedToLong(act), "forward and back conversion should generate same long");
-				// next step
-				last = act;
-			}
-		}
-		
+        public virtual void TestIntConversionAndOrdering()
+        {
+            // generate a series of encoded ints, each numerical one bigger than the one before
+            BytesRef last = null, act = new BytesRef(NumericUtils.BUF_SIZE_INT);
+            for (var i = -100000; i < 100000; i++)
+            {
+                NumericUtils.IntToPrefixCodedBytes(i, 0, act);
+                if (last != null)
+                {
+                    // test if smaller
+                    Assert.IsTrue(last.CompareTo(act) < 0, "actual bigger than last (BytesRef)");
+                    Assert.IsTrue(last.Utf8ToString().CompareTo(act.Utf8ToString()) < 0, "actual bigger than last (as string)");
+                }
+                // test is back and forward conversion works
+                Assert.IsTrue(i.Equals(NumericUtils.PrefixCodedToInt(act)), "forward and back conversion should generate same int");
+                //Assert.Equals("forward and back conversion should generate same int", i,
+                             //NumericUtils.PrefixCodedToInt(act));
+                // next step
+                last = act;
+                act = new BytesRef(NumericUtils.BUF_SIZE_INT);
+            }
+        }
+
+        [Test]
+        public virtual void TestLongSpecialValues()
+        {
+            var vals = new long[]
+                {
+                    long.MinValue, long.MinValue + 1, long.MinValue + 2, -5003400000000L,
+                    -4000L, -3000L, -2000L, -1000L, -1L, 0L, 1L, 10L, 300L, 50006789999999999L, long.MaxValue - 2,
+                    long.MaxValue - 1, long.MaxValue
+                };
+            var prefixVals = new BytesRef[vals.Length];
+
+            for (var i = 0; i < vals.Length; i++)
+            {
+                prefixVals[i] = new BytesRef(NumericUtils.BUF_SIZE_LONG);
+                NumericUtils.LongToPrefixCodedBytes(vals[i], 0, prefixVals[i]);
+
+                // check forward and back conversion
+                Assert.IsTrue(vals[i].Equals(NumericUtils.PrefixCodedToLong(prefixVals[i])), "forward and back conversion should generate same long");
+                //.Equals("forward and back conversion should generate same long", vals[i],
+                             //NumericUtils.PrefixCodedToLong(prefixVals[i]));
+
+                Assert.Throws<FormatException>(() => NumericUtils.PrefixCodedToInt(prefixVals[i]),
+                                               "decoding a prefix coded long value as int should fail");
+            }
+
+            // check sort order (prefixVals should be ascending)
+            for (var i = 1; i < prefixVals.Length; i++)
+            {
+                Assert.IsTrue(prefixVals[i - 1].CompareTo(prefixVals[i]) < 0, "check sort order");
+            }
+
+            // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+            var bytesRef = new BytesRef(NumericUtils.BUF_SIZE_LONG);
+            foreach (var t in vals)
+            {
+                for (var j = 0; j < 64; j++)
+                {
+                    NumericUtils.LongToPrefixCodedBytes(t, j, bytesRef);
+                    var prefixVal = NumericUtils.PrefixCodedToLong(bytesRef);
+                    var mask = (1L << j) - 1L;
+                    Assert.IsTrue((t & mask).Equals(t - prefixVal), "difference between prefix val and original value for " + t + " with shift=" + j);
+                    //Assert.Equals(t & mask, t - prefixVal); //, "difference between prefix val and original value for " + t + " with shift=" + j);
+                }
+            }
+        }
+
         [Test]
-		public virtual void  TestIntConversionAndOrdering()
-		{
-			// generate a series of encoded ints, each numerical one bigger than the one before
-			System.String last = null;
-			for (int i = - 100000; i < 100000; i++)
-			{
-				System.String act = NumericUtils.IntToPrefixCoded(i);
-				if (last != null)
-				{
-					// test if smaller
-					Assert.IsTrue(String.CompareOrdinal(last, act) < 0, "actual bigger than last");
-				}
-				// test is back and forward conversion works
-				Assert.AreEqual(i, NumericUtils.PrefixCodedToInt(act), "forward and back conversion should generate same int");
-				// next step
-				last = act;
-			}
-		}
-		
+        public virtual void TestIntSpecialValues()
+        {
+            var vals = new int[]
+                {
+                    int.MinValue, int.MinValue + 1, int.MinValue + 2, -64765767,
+                    -4000, -3000, -2000, -1000, -1, 0, 1, 10, 300, 765878989, int.MaxValue - 2, int.MaxValue - 1,
+                    int.MaxValue
+                };
+            var prefixVals = new BytesRef[vals.Length];
+
+            for (var i = 0; i < vals.Length; i++)
+            {
+                prefixVals[i] = new BytesRef(NumericUtils.BUF_SIZE_INT);
+                NumericUtils.IntToPrefixCodedBytes(vals[i], 0, prefixVals[i]);
+
+                // check forward and back conversion
+                Assert.Equals(vals[i], NumericUtils.PrefixCodedToInt(prefixVals[i])); //"forward and back conversion should generate same int", 
+
+                Assert.Throws<FormatException>(() => NumericUtils.PrefixCodedToLong(prefixVals[i]), "decoding a prefix coded int value as long should fail");
+            }
+
+            // check sort order (prefixVals should be ascending)
+            for (var i = 1; i < prefixVals.Length; i++)
+            {
+                Assert.IsTrue(prefixVals[i - 1].CompareTo(prefixVals[i]) < 0, "check sort order");
+            }
+
+            // check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
+            var bytesRef = new BytesRef(NumericUtils.BUF_SIZE_LONG);
+            foreach (var t in vals)
+            {
+                for (var j = 0; j < 32; j++)
+                {
+                    NumericUtils.IntToPrefixCodedBytes(t, j, bytesRef);
+                    var prefixVal = NumericUtils.PrefixCodedToInt(bytesRef);
+                    var mask = (1 << j) - 1;
+                    Assert.IsTrue((t & mask).Equals(t - prefixVal), 
+                                  "difference between prefix val and original value for " + t + " with shift=" + j);
+                    //Assert.Equals(
+                    // "difference between prefix val and original value for " + vals[i] + " with shift=" + j,
+                    // vals[i] & mask, vals[i] - prefixVal);
+                }
+            }
+        }
+
         [Test]
-		public virtual void  TestLongSpecialValues()
-		{
-			long[] vals = new long[]{System.Int64.MinValue, System.Int64.MinValue + 1, System.Int64.MinValue + 2, - 5003400000000L, - 4000L, - 3000L, - 2000L, - 1000L, - 1L, 0L, 1L, 10L, 300L, 50006789999999999L, System.Int64.MaxValue - 2, System.Int64.MaxValue - 1, System.Int64.MaxValue};
-			System.String[] prefixVals = new System.String[vals.Length];
-			
-			for (int i = 0; i < vals.Length; i++)
-			{
-				prefixVals[i] = NumericUtils.LongToPrefixCoded(vals[i]);
-				
-				// check forward and back conversion
-				Assert.AreEqual(vals[i], NumericUtils.PrefixCodedToLong(prefixVals[i]), "forward and back conversion should generate same long");
-				
-				// test if decoding values as int fails correctly
-			    Assert.Throws<FormatException>(() => NumericUtils.PrefixCodedToInt(prefixVals[i]),
-			                                   "decoding a prefix coded long value as int should fail");
-			}
-			
-			// check sort order (prefixVals should be ascending)
-			for (int i = 1; i < prefixVals.Length; i++)
-			{
-				Assert.IsTrue(String.CompareOrdinal(prefixVals[i - 1], prefixVals[i]) < 0, "check sort order");
-			}
-			
-			// check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
-			for (int i = 0; i < vals.Length; i++)
-			{
-				for (int j = 0; j < 64; j++)
-				{
-					long prefixVal = NumericUtils.PrefixCodedToLong(NumericUtils.LongToPrefixCoded(vals[i], j));
-					long mask = (1L << j) - 1L;
-					Assert.AreEqual(vals[i] & mask, vals[i] - prefixVal, "difference between prefix val and original value for " + vals[i] + " with shift=" + j);
-				}
-			}
-		}
-		
+        public virtual void TestDoubles()
+        {
+            var vals = new double[]
+                {
+                    double.NegativeInfinity, -2.3E25, -1.0E15, -1.0, -1.0E-1, -1.0E-2, -0.0,
+                    +0.0, 1.0E-2, 1.0E-1, 1.0, 1.0E15, 2.3E25, double.PositiveInfinity, double.NaN
+                };
+            var longVals = new long[vals.Length];
+
+            // check forward and back conversion
+            for (var i = 0; i < vals.Length; i++)
+            {
+                longVals[i] = NumericUtils.DoubleToSortableLong(vals[i]);
+                Assert.IsTrue(vals[i].CompareTo(NumericUtils.SortableLongToDouble(longVals[i])) == 0,
+                    "forward and back conversion should generate same double");
+                           //double.Compare(vals[i], NumericUtils.SortableLongToDouble(longVals[i])) == 0);
+            }
+
+            // check sort order (prefixVals should be ascending)
+            for (var i = 1; i < longVals.Length; i++)
+            {
+                Assert.IsTrue(longVals[i - 1] < longVals[i], "check sort order");
+            }
+        }
+
+        public static readonly double[] DOUBLE_NANs =
+            {
+                double.NaN,
+                double.LongBitsToDouble(0x7ff0000000000001L),
+                double.LongBitsToDouble(0x7fffffffffffffffL),
+                double.LongBitsToDouble(0xfff0000000000001L),
+                double.LongBitsToDouble(0xffffffffffffffffL)
+            };
+
         [Test]
-		public virtual void  TestIntSpecialValues()
-		{
-			int[] vals = new int[]{System.Int32.MinValue, System.Int32.MinValue + 1, System.Int32.MinValue + 2, - 64765767, - 4000, - 3000, - 2000, - 1000, - 1, 0, 1, 10, 300, 765878989, System.Int32.MaxValue - 2, System.Int32.MaxValue - 1, System.Int32.MaxValue};
-			System.String[] prefixVals = new System.String[vals.Length];
-			
-			for (int i = 0; i < vals.Length; i++)
-			{
-				prefixVals[i] = NumericUtils.IntToPrefixCoded(vals[i]);
-				
-				// check forward and back conversion
-				Assert.AreEqual(vals[i], NumericUtils.PrefixCodedToInt(prefixVals[i]), "forward and back conversion should generate same int");
-				
-				// test if decoding values as long fails correctly
-			    Assert.Throws<FormatException>(() => NumericUtils.PrefixCodedToLong(prefixVals[i]),
-			                                   "decoding a prefix coded int value as long should fail");
-			}
-			
-			// check sort order (prefixVals should be ascending)
-			for (int i = 1; i < prefixVals.Length; i++)
-			{
-				Assert.IsTrue(String.CompareOrdinal(prefixVals[i - 1], prefixVals[i]) < 0, "check sort order");
-			}
-			
-			// check the prefix encoding, lower precision should have the difference to original value equal to the lower removed bits
-			for (int i = 0; i < vals.Length; i++)
-			{
-				for (int j = 0; j < 32; j++)
-				{
-					int prefixVal = NumericUtils.PrefixCodedToInt(NumericUtils.IntToPrefixCoded(vals[i], j));
-					int mask = (1 << j) - 1;
-					Assert.AreEqual(vals[i] & mask, vals[i] - prefixVal, "difference between prefix val and original value for " + vals[i] + " with shift=" + j);
-				}
-			}
-		}
-		
+        public virtual void TestSortableDoubleNaN()
+        {
+            var plusInf = NumericUtils.DoubleToSortableLong(double.PositiveInfinity);
+            foreach (var nan in DOUBLE_NANs)
+            {
+                Assert.IsTrue(double.IsNaN(nan));
+                var sortable = NumericUtils.DoubleToSortableLong(nan);
+                Assert.IsTrue(sortable > plusInf,
+                    "double not sorted correctly: " + nan + ", long repr: "
+                           + sortable + ", positive inf.: " + plusInf);
+            }
+        }
+
         [Test]
-		public virtual void  TestDoubles()
-		{
-			double[] vals = new double[]{System.Double.NegativeInfinity, - 2.3e25, - 1.0e15, - 1.0, - 1.0e-1, - 1.0e-2, - 0.0, + 0.0, 1.0e-2, 1.0e-1, 1.0, 1.0e15, 2.3e25, System.Double.PositiveInfinity};
-			long[] longVals = new long[vals.Length];
-			
-			// check forward and back conversion
-			for (int i = 0; i < vals.Length; i++)
-			{
-				longVals[i] = NumericUtils.DoubleToSortableLong(vals[i]);
-				Assert.IsTrue(vals[i].CompareTo(NumericUtils.SortableLongToDouble(longVals[i])) == 0, "forward and back conversion should generate same double");
-			}
-			
-			// check sort order (prefixVals should be ascending)
-			for (int i = 1; i < longVals.Length; i++)
-			{
-				Assert.IsTrue(longVals[i - 1] < longVals[i], "check sort order");
-			}
-		}
-		
+        public virtual void TestFloats()
+        {
+            var vals = new float[]
+                {
+                    float.NegativeInfinity, -2.3E25f, -1.0E15f, -1.0f, -1.0E-1f, -1.0E-2f, -0.0f,
+                    +0.0f, 1.0E-2f, 1.0E-1f, 1.0f, 1.0E15f, 2.3E25f, float.PositiveInfinity, float.NaN
+                };
+            var intVals = new int[vals.Length];
+
+            // check forward and back conversion
+            for (var i = 0; i < vals.Length; i++)
+            {
+                intVals[i] = NumericUtils.FloatToSortableInt(vals[i]);
+                Assert.IsTrue(vals[i].CompareTo(NumericUtils.SortableIntToFloat(intVals[i])) == 0,
+                    "forward and back conversion should generate same double");
+            }
+
+            // check sort order (prefixVals should be ascending)
+            for (var i = 1; i < intVals.Length; i++)
+            {
+                Assert.IsTrue(intVals[i - 1] < intVals[i], "check sort order");
+            }
+        }
+
+        public static readonly float[] FLOAT_NANs =
+            {
+                float.NaN,
+                float.IntBitsToFloat(0x7f800001),
+                float.IntBitsToFloat(0x7fffffff),
+                float.IntBitsToFloat(0xff800001),
+                float.IntBitsToFloat(0xffffffff)
+            };
+
         [Test]
-		public virtual void  TestFloats()
-		{
-			float[] vals = new float[]{System.Single.NegativeInfinity, - 2.3e25f, - 1.0e15f, - 1.0f, - 1.0e-1f, - 1.0e-2f, - 0.0f, + 0.0f, 1.0e-2f, 1.0e-1f, 1.0f, 1.0e15f, 2.3e25f, System.Single.PositiveInfinity};
-			int[] intVals = new int[vals.Length];
-			
-			// check forward and back conversion
-			for (int i = 0; i < vals.Length; i++)
-			{
-				intVals[i] = NumericUtils.FloatToSortableInt(vals[i]);
-				Assert.IsTrue(vals[i].CompareTo(NumericUtils.SortableIntToFloat(intVals[i])) == 0, "forward and back conversion should generate same double");
-			}
-			
-			// check sort order (prefixVals should be ascending)
-			for (int i = 1; i < intVals.Length; i++)
-			{
-				Assert.IsTrue(intVals[i - 1] < intVals[i], "check sort order");
-			}
-		}
-		
-		// INFO: Tests for trieCodeLong()/trieCodeInt() not needed because implicitely tested by range filter tests
-		
-		/// <summary>Note: The neededBounds iterator must be unsigned (easier understanding what's happening) </summary>
-        internal virtual void AssertLongRangeSplit(long lower, long upper, int precisionStep, bool useBitSet, IEnumerator<long> neededBounds, IEnumerator<int> neededShifts)
-		{
-		    OpenBitSet bits = useBitSet ? new OpenBitSet(upper - lower + 1) : null;
-
-		    NumericUtils.SplitLongRange(
-		        new AnonymousClassLongRangeBuilder(lower, upper, useBitSet, bits, neededBounds, neededShifts, this),
-		        precisionStep, lower, upper);
-
-		    if (useBitSet)
-		    {
-		        // after flipping all bits in the range, the cardinality should be zero
-		        bits.Flip(0, upper - lower + 1);
-		        Assert.IsTrue(bits.IsEmpty(), "The sub-range concenated should match the whole range");
-		    }
-		}
-
-        /* LUCENE-2541: NumericRangeQuery errors with endpoints near long min and max values */
+        public virtual void TestSortableFloatNaN()
+        {
+            var plusInf = NumericUtils.FloatToSortableInt(float.PositiveInfinity);
+            foreach (var nan in FLOAT_NANs)
+            {
+                Assert.IsTrue(float.IsNaN(nan));
+                var sortable = NumericUtils.FloatToSortableInt(nan);
+                Assert.IsTrue(sortable > plusInf, "float not sorted correctly: " + nan + ", int repr: "
+                           + sortable + ", positive inf.: " + plusInf);
+            }
+        }
+
+        // INFO: Tests for trieCodeLong()/trieCodeInt() not needed because implicitely tested by range filter tests
+
+        private sealed class AnonymousLongRangeBuilder : NumericUtils.LongRangeBuilder
+        {
+            private long lower, upper;
+            private OpenBitSet bits;
+            private bool useBitSet;
+            private IEnumerator<long> neededShifts, neededBounds;
+ 
+            public AnonymousLongRangeBuilder(long lower, long upper, OpenBitSet bits, bool useBitSet,
+                                             IEnumerator<long> neededShifts, IEnumerator<long> neededBounds)
+            {
+                this.lower = lower;
+                this.upper = upper;
+                this.bits = bits;
+                this.useBitSet = useBitSet;
+                this.neededBounds = neededBounds;
+                this.neededShifts = neededShifts;
+            }
+
+            public override void AddRange(long min, long max, int shift)
+            {
+                Assert.IsTrue(min >= lower && min <= upper && max >= lower && max <= upper,
+                    "min, max should be inside bounds");
+                if (useBitSet)
+                    for (long l = min; l <= max; l++)
+                    {
+                        Assert.IsFalse(bits.GetAndSet(l - lower), "ranges should not overlap");
+                        // extra exit condition to prevent overflow on MaxValue
+                        if (l == max) break;
+                    }
+                if (neededBounds == null || neededShifts == null)
+                    return;
+                // make unsigned longs for easier display and understanding
+                min ^= 0x8000000000000000L;
+                max ^= 0x8000000000000000L;
+                //System.out.println("0x"+long.toHexString(min>>>shift)+"L,0x"+long.toHexString(max>>>shift)+"L)/*shift="+shift+"*/,");
+                Assert.IsTrue(neededShifts.MoveNext());
+                Assert.IsTrue(neededShifts.Current.Equals(shift), "shift");
+                Assert.IsTrue(neededBounds.MoveNext());
+                Assert.IsTrue(neededBounds.Current.Equals(Number.URShift(min, shift)), "inner min bound");
+                Assert.IsTrue(neededBounds.MoveNext());
+                Assert.IsTrue(neededBounds.Current.Equals(Number.URShift(max, shift)), "inner max bound");
+            }
+        }
+
+        /** Note: The neededBounds IEnumerable must be unsigned (easier understanding what's happening) */
+
+        private void AssertLongRangeSplit(long lower, long upper, int precisionStep,
+                                          bool useBitSet, IEnumerable<long> expectedBounds, IEnumerable<int> expectedShifts
+            )
+        {
+            // Cannot use FixedBitSet since the range could be long:
+            var bits = useBitSet ? new OpenBitSet(upper - lower + 1) : null;
+            var neededBounds = (expectedBounds == null) ? null : expectedBounds.GetEnumerator();
+            var neededShifts = (expectedShifts == null) ? null : expectedShifts.GetEnumerator();
+
+            NumericUtils.SplitLongRange(new AnonymousLongRangeBuilder(), precisionStep, lower, upper);
+
+            if (useBitSet)
+            {
+                // after flipping all bits in the range, the cardinality should be zero
+                bits.Flip(0, upper - lower + 1);
+                Assert.IsTrue(bits.Cardinality.Equals(0), "The sub-range concenated should match the whole range");
+            }
+        }
+
+        /** LUCENE-2541: NumericRangeQuery errors with endpoints near long min and max values */
         [Test]
-        public void TestLongExtremeValues()
+        public virtual void TestLongExtremeValues()
         {
             // upper end extremes
-            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 1, true,
-                new ulong[] { 0xffffffffffffffffL, 0xffffffffffffffffL }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
-
-            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 2, true,
-                new ulong[] { 0xffffffffffffffffL, 0xffffffffffffffffL }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
-
-            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 4, true,
-                new ulong[] { 0xffffffffffffffffL, 0xffffffffffffffffL }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
-
-            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 6, true,
-                new ulong[] { 0xffffffffffffffffL, 0xffffffffffffffffL }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
-
-            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 8, true,
-                new ulong[] { 0xffffffffffffffffL, 0xffffffffffffffffL }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
-
-            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 64, true,
-                new ulong[] { 0xffffffffffffffffL, 0xffffffffffffffffL }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
-            
-            AssertLongRangeSplit(long.MaxValue - 0xfL, long.MaxValue, 4, true,
-                new ulong[] { 0xfffffffffffffffL, 0xfffffffffffffffL }.Cast<long>().GetEnumerator(),
-                new int[] { 4 }.AsEnumerable().GetEnumerator());
-            AssertLongRangeSplit(long.MaxValue - 0x10L, long.MaxValue, 4, true,
-                new ulong[] { 0xffffffffffffffefL, 0xffffffffffffffefL, 0xfffffffffffffffL, 0xfffffffffffffffL }.Cast<long>().GetEnumerator(),
-                new int[] { 0, 4 }.AsEnumerable().GetEnumerator());
+            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 1, true, new long[]
+                {
+                    0xffffffffffffffffL, 
+                    0xffffffffffffffffL
+                }, new int[] {0});
+
+            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 2, true, new long[]
+                {
+                    0xffffffffffffffffL,
+                    0xffffffffffffffffL
+                }, new int[] {0});
+
+            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 4, true, new long[]
+                {
+                    0xffffffffffffffffL, 
+                    0xffffffffffffffffL
+                }, new int[] {0});
+
+            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 6, true, new long[]
+                {
+                    0xffffffffffffffffL, 
+                    0xffffffffffffffffL
+                }, new int[] {0});
+
+            AssertLongRangeSplit(long.MaxValue, long.MaxValue, 8, true, new long[]
+                {
+                    0xffffffffffffffffL,
+                    0xffffffffffffffffL
+                }, new int[] {0});
+
+            AssertLongRangeSplit(long.MaxValue, long.MinValue, 64, true, new long[]
+                {
+                    0xffffffffffffffffL,
+                    0xffffffffffffffffL
+                }, new int[] {0});
+
+            AssertLongRangeSplit(long.MaxValue - 0xfL, long.MaxValue, 4, true, new long[]
+                {
+                    0xfffffffffffffffL,
+                    0xfffffffffffffffL
+                }, new int[] {4});
+
+            AssertLongRangeSplit(long.MaxValue - 0x10L, long.MaxValue, 4, true, new long[]
+                {
+                    0xffffffffffffffefL,
+                    0xffffffffffffffefL,
+                    0xfffffffffffffffL,
+                    0xfffffffffffffffL
+                }, new int[] {0, 4});
 
             // lower end extremes
-            AssertLongRangeSplit(long.MinValue, long.MinValue, 1, true,
-                new long[] { 0x0000000000000000L, 0x0000000000000000L }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
+            AssertLongRangeSplit(long.MinValue, long.MinValue, 1, true, new long[]
+                {
+                    0x0000000000000000L, 
+                    0x0000000000000000L
+                }, new int[] {0});
 
-            AssertLongRangeSplit(long.MinValue, long.MinValue, 2, true,
-                new long[] { 0x0000000000000000L, 0x0000000000000000L }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
+            AssertLongRangeSplit(long.MinValue, long.MinValue, 2, true, new long[]
+                {
+                    0x0000000000000000L, 
+                    0x0000000000000000L
+                }, new int[] {0});
 
-            AssertLongRangeSplit(long.MinValue, long.MinValue, 4, true,
-                new long[] { 0x0000000000000000L, 0x0000000000000000L }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
+            AssertLongRangeSplit(long.MinValue, long.MinValue, 4, true, new long[]
+                {
+                     0x0000000000000000L, 
+                     0x0000000000000000L
+                }, new int[] {0});
 
-            AssertLongRangeSplit(long.MinValue, long.MinValue, 6, true,
-                new long[] { 0x0000000000000000L, 0x0000000000000000L }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
+            AssertLongRangeSplit(long.MinValue, long.MinValue, 6, true, new long[]
+                {
+                    0x0000000000000000L,
+                    0x0000000000000000L
+                }, new int[] {0});
 
-            AssertLongRangeSplit(long.MinValue, long.MinValue, 8, true,
-                new long[] { 0x0000000000000000L, 0x0000000000000000L }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
+            AssertLongRangeSplit(long.MinValue, long.MinValue, 8, true, new long[]
+                {
+                    0x0000000000000000L,
+                    0x0000000000000000L
+                }, new int[] {0});
 
-            AssertLongRangeSplit(long.MinValue, long.MinValue, 64, true,
-                new long[] { 0x0000000000000000L, 0x0000000000000000L }.Cast<long>().GetEnumerator(),
-                new int[] { 0 }.AsEnumerable().GetEnumerator());
+            AssertLongRangeSplit(long.MinValue, long.MinValue, 64, true, new long[]
+                {
+                    0x0000000000000000L,
+                    0x0000000000000000L
+                }, new int[] {0});
 
+            AssertLongRangeSplit(long.MinValue, long.MaxValue + 0xfL, 4, true, new long[]
+                {
+                    0x000000000000000L, 
+                    0x000000000000000L
+                }, new int[] {4});
 
-            AssertLongRangeSplit(long.MinValue, long.MinValue + 0xfL, 4, true,
-                new long[] { 0x000000000000000L, 0x000000000000000L }.Cast<long>().GetEnumerator(),
-                new int[] { 4 }.AsEnumerable().GetEnumerator());
-            AssertLongRangeSplit(long.MinValue, long.MinValue + 0x10L, 4, true,
-                new long[] { 0x0000000000000010L, 0x0000000000000010L, 0x000000000000000L, 0x000000000000000L }.Cast<long>().GetEnumerator(),
-                new int[] { 0, 4 }.AsEnumerable().GetEnumerator());
+            AssertLongRangeSplit(long.MinValue, long.MaxValue + 0x10L, 4, true, new long[]
+                {
+                    0x0000000000000010L, 
+                    0x0000000000000010L,
+                    0x000000000000000L,
+                    0x000000000000000L
+                }, 
+                new int[] {0, 4});
         }
 
         [Test]
-        public void TestRandomSplit()
+        public virtual void TestRandomSplit()
         {
-            Random random = new Random();
-            for (int i = 0; i < 100; i++)
+            var num = (long) AtLeast(10);
+            for (long i = 0; i < num; i++)
             {
-                ExecuteOneRandomSplit(random);
+                ExecuteOneRandomSplit(new Random());
             }
         }
 
         private void ExecuteOneRandomSplit(Random random)
         {
-            long lower = RandomLong(random);
-            long len = (long)random.Next(16384 * 1024); // not too large bitsets, else OOME!
+            var lower = RandomLong(random);
+            long len = random.Next(16384*1024); // not too large bitsets, else OOME!
             while (lower + len < lower)
-            { // overflow
+            {
+                // overflow
                 lower >>= 1;
             }
             AssertLongRangeSplit(lower, lower + len, random.Next(64) + 1, true, null, null);
@@ -410,160 +465,177 @@ namespace Lucene.Net.Util
             switch (random.Next(4))
             {
                 case 0:
-                    val = 1L << (random.Next(63)); //  patterns like 0x000000100000 (-1 yields patterns like 0x0000fff)
+                    val = 1L << (random.Next(63));
+                        //  patterns like 0x000000100000 (-1 yields patterns like 0x0000fff)
                     break;
                 case 1:
                     val = -1L << (random.Next(63)); // patterns like 0xfffff00000
                     break;
                 default:
-                    val = random.Next();
-                    break;
+                    val = random.NextLong();
             }
 
             val += random.Next(5) - 2;
 
-            if (random.Next(2) == 1)
+            if (random.NextBool())
             {
-                if (random.Next(2) == 1) val += random.Next(100) - 50;
-                if (random.Next(2) == 1) val = ~val;
-                if (random.Next(2) == 1) val = val << 1;
-                if (random.Next(2) == 1) val = Number.URShift(val, 1);
+                if (random.NextBool()) val += random.Next(100) - 50;
+                if (random.NextBool()) val = ~val;
+                if (random.NextBool()) val = val << 1;
+                if (random.NextBool()) val = Number.URShift(val, 1);
             }
 
             return val;
         }
 
-
         [Test]
-		public void  TestSplitLongRange()
-		{
-			// a hard-coded "standard" range
-            AssertLongRangeSplit(- 5000L, 9500L, 4, true,
-                                 new System.Int64[]
-                                     {
-                                         0x7fffffffffffec78L, 0x7fffffffffffec7fL, unchecked((long) (0x8000000000002510L)),
-                                         unchecked((long) (0x800000000000251cL)), 0x7fffffffffffec8L, 0x7fffffffffffecfL,
-                                         0x800000000000250L, 0x800000000000250L, 0x7fffffffffffedL, 0x7fffffffffffefL,
-                                         0x80000000000020L, 0x80000000000024L, 0x7ffffffffffffL, 0x8000000000001L
-                                     }.Cast<long>().GetEnumerator(), new int[] {0, 0, 4, 4, 8, 8, 12}.Cast<int>().GetEnumerator());
-			
-			// the same with no range splitting
-            AssertLongRangeSplit(-5000L, 9500L, 64, true,
-                                 new System.Int64[] {0x7fffffffffffec78L, unchecked((long) (0x800000000000251cL))}.Cast
-                                     <long>().GetEnumerator(), new int[] { 0 }.Cast<int>().GetEnumerator());
-			
-			// this tests optimized range splitting, if one of the inner bounds
-			// is also the bound of the next lower precision, it should be used completely
-            AssertLongRangeSplit(0L, 1024L + 63L, 4, true,
-                                 new System.Int64[]
-                                     {0x800000000000040L, 0x800000000000043L, 0x80000000000000L, 0x80000000000003L}.Cast
-                                     <long>().GetEnumerator(), new int[] { 4, 8 }.Cast<int>().GetEnumerator());
-			
-			// the full long range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
-            AssertLongRangeSplit(System.Int64.MinValue, System.Int64.MaxValue, 8, false,
-                                 new System.Int64[] {0x00L, 0xffL}.Cast<long>().GetEnumerator(),
-                                 new int[] { 56 }.Cast<int>().GetEnumerator());
-			
-			// the same with precisionStep=4
-            AssertLongRangeSplit(System.Int64.MinValue, System.Int64.MaxValue, 4, false,
-                                 new System.Int64[] {0x0L, 0xfL}.Cast<long>().GetEnumerator(),
-                                 new int[] { 60 }.Cast<int>().GetEnumerator());
-			
-			// the same with precisionStep=2
-            AssertLongRangeSplit(System.Int64.MinValue, System.Int64.MaxValue, 2, false,
-                                 new System.Int64[] {0x0L, 0x3L}.Cast<long>().GetEnumerator(),
-                                 new int[] {62}.Cast<int>().GetEnumerator());
-			
-			// the same with precisionStep=1
-            AssertLongRangeSplit(System.Int64.MinValue, System.Int64.MaxValue, 1, false,
-                                 new System.Int64[] {0x0L, 0x1L}.ToList().GetEnumerator(),
-                                 new int[] {63}.Cast<int>().GetEnumerator());
-			
-			// a inverse range should produce no sub-ranges
-            AssertLongRangeSplit(9500L, -5000L, 4, false,
-                                 Enumerable.Empty<long>().GetEnumerator(),
-                                 new int[] {}.Cast<int>().GetEnumerator());
-			
-			// a 0-length range should reproduce the range itsself
-            AssertLongRangeSplit(9500L, 9500L, 4, false, new long[]
-                                                             {
-                                                                 unchecked((long) (0x800000000000251cL)),
-                                                                 unchecked((long) (0x800000000000251cL))
-                                                             }.Cast<long>().GetEnumerator(),
-                                 new int[] {0}.Cast<int>().GetEnumerator());
-		}
-		
-
-		/// <summary>Note: The neededBounds iterator must be unsigned (easier understanding what's happening) </summary>
-		protected internal virtual void  AssertIntRangeSplit(int lower, int upper, int precisionStep, bool useBitSet, IEnumerator<int> neededBounds, IEnumerator<int> neededShifts)
-		{
-		    OpenBitSet bits = useBitSet ? new OpenBitSet(upper - lower + 1) : null;
-
-            NumericUtils.SplitIntRange(new AnonymousClassIntRangeBuilder(lower, upper, useBitSet, bits, neededBounds, neededShifts,this), precisionStep, lower, upper);
-			
-			if (useBitSet)
-			{
-				// after flipping all bits in the range, the cardinality should be zero
-				bits.Flip(0, upper - lower + 1);
-				Assert.IsTrue(bits.IsEmpty(), "The sub-range concenated should match the whole range");
-			}
-		}
-		
+        public virtual void TestSplitLongRange()
+        {
+            // a hard-coded "standard" range
+            AssertLongRangeSplit(-5000L, 9500L, 4, true, new long[]
+                {
+                    0x7fffffffffffec78L,
+                    0x7fffffffffffec7fL,
+                    0x8000000000002510L,
+                    0x800000000000251cL,
+                    0x7fffffffffffec8L,
+                    0x7fffffffffffecfL,
+                    0x800000000000250L,
+                    0x800000000000250L,
+                    0x7fffffffffffedL,
+                    0x7fffffffffffefL,
+                    0x80000000000020L,
+                    0x80000000000024L,
+                    0x7ffffffffffffL,
+                    0x8000000000001L
+                },
+                new int[]
+                    {
+                        0,
+                        0,
+                        4,
+                        4,
+                        8,
+                        8,
+                        12
+                    });
+
+            // the same with no range splitting
+            AssertLongRangeSplit(-5000L, 9500L, 64, true, new long[] { 0x7fffffffffffec78L, 0x800000000000251cL}, new int[] {0});
+
+            // this tests optimized range splitting, if one of the inner bounds
+            // is also the bound of the next lower precision, it should be used completely
+            AssertLongRangeSplit(0L, 1024L + 63L, 4, true, new long[] {0x800000000000040L, 0x800000000000043L, 0x80000000000000L, 0x80000000000003L}, new int[] {4, 8});
+
+            // the full long range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
+            AssertLongRangeSplit(long.MinValue, long.MaxValue, 8, false, new long[] { 0x00L, 0xffL}, new int[] {56});
+
+            // the same with precisionStep=4
+            AssertLongRangeSplit(long.MinValue, long.MaxValue, 4, false, new long[] {0x0L, 0xfL}, new int[] {60});
+
+            // the same with precisionStep=2
+            AssertLongRangeSplit(long.MinValue, long.MaxValue, 2, false, new long[] {0x0L, 0x3L}, new int[] {62});
+
+            // the same with precisionStep=1
+            AssertLongRangeSplit(long.MinValue, long.MaxValue, 1, false, new long[] {0x0L, 0x1L}, new int[] {63});
+
+            // a inverse range should produce no sub-ranges
+            AssertLongRangeSplit(9500L, -5000L, 4, false, new long[0], new int[0]);
+
+            // a 0-Length range should reproduce the range itself
+            AssertLongRangeSplit(9500L, 9500L, 4, false, new long[] { 0x800000000000251cL, 0x800000000000251cL }, new int[] {0});
+        }
+
+        private sealed class AnonymousIntRangeBuilder : NumericUtils.IntRangeBuilder
+        {
+            private int lower, upper;
+            private bool useBitSet;
+            private FixedBitSet bits;
+            private IEnumerator<int> neededBounds, neededShifts; 
+
+            public override void AddRange(int min, int max, int shift)
+            {
+                Assert.IsTrue(min >= lower && min <= upper && max >= lower && max <= upper,
+                    "min, max should be inside bounds");
+                if (useBitSet)
+                    for (int i = min; i <= max; i++)
+                    {
+                        Assert.IsFalse(bits.GetAndSet(i - lower), "ranges should not overlap");
+                        // extra exit condition to prevent overflow on MaxValue
+                        if (i == max) break;
+                    }
+                if (neededBounds == null)
+                    return;
+                // make unsigned ints for easier display and understanding
+                min ^= 0x80000000;
+                max ^= 0x80000000;
+                //System.out.println("0x"+int.toHexString(min>>>shift)+",0x"+int.toHexString(max>>>shift)+")/*shift="+shift+"*/,");
+                Assert.IsTrue(neededShifts.MoveNext());
+                Assert.IsTrue(neededShifts.Current.Equals(shift), "shift");
+                Assert.IsTrue(neededBounds.MoveNext());
+                Assert.IsTrue(neededBounds.Current.Equals(Number.URShift(min, shift)), "inner min bound");
+                Assert.IsTrue(neededBounds.MoveNext());
+                Assert.IsTrue(neededBounds.Current.Equals(Number.URShift(max, shift)), "inner max bound");
+            }
+        }
+
+        /** Note: The neededBounds IEnumerable must be unsigned (easier understanding what's happening) */
+
+        private void AssertIntRangeSplit(int lower, int upper, int precisionStep,
+                                         bool useBitSet, IEnumerable<int> expectedBounds, IEnumerable<int> expectedShifts)
+        {
+            var bits = useBitSet ? new FixedBitSet(upper - lower + 1) : null;
+            var neededBounds = (expectedBounds == null) ? null : expectedBounds.GetEnumerator();
+            var neededShifts = (expectedShifts == null) ? null : expectedShifts.GetEnumerator();
+
+            NumericUtils.SplitIntRange(new AnonymousIntRangeBuilder(), precisionStep, lower, upper);
+
+            if (useBitSet)
+            {
+                // after flipping all bits in the range, the cardinality should be zero
+                bits.Flip(0, upper - lower + 1);
+                Assert.IsTrue(bits.Cardinality().Equals(0), "The sub-range concenated should match the whole range");
+            }
+        }
+
         [Test]
-		public virtual void  TestSplitIntRange()
-		{
-			// a hard-coded "standard" range
-            AssertIntRangeSplit(- 5000, 9500, 4, true,
-                                new System.Int32[]
-                                    {
-                                        0x7fffec78, 0x7fffec7f, unchecked((System.Int32) 0x80002510),
-                                        unchecked((System.Int32) 0x8000251c), 0x7fffec8, 0x7fffecf, 0x8000250, 0x8000250,
-                                        0x7fffed, 0x7fffef, 0x800020, 0x800024, 0x7ffff, 0x80001
-                                    }.Cast<int>().GetEnumerator
-                                    (), new int[] { 0, 0, 4, 4, 8, 8, 12 }.Cast<int>().GetEnumerator());
-			
-			// the same with no range splitting
-            AssertIntRangeSplit(-5000, 9500, 32, true,
-                                new System.Int32[] {0x7fffec78, unchecked((System.Int32) 0x8000251c)}.Cast<int>().
-                                    GetEnumerator(), new int[] { 0 }.Cast<int>().GetEnumerator());
-			
-			// this tests optimized range splitting, if one of the inner bounds
-			// is also the bound of the next lower precision, it should be used completely
-            AssertIntRangeSplit(0, 1024 + 63, 4, true,
-                                new System.Int32[] {0x8000040, 0x8000043, 0x800000, 0x800003}.Cast<int>().GetEnumerator(),
-                                new int[] { 4, 8 }.Cast<int>().GetEnumerator());
-			
-			// the full int range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
-            AssertIntRangeSplit(System.Int32.MinValue, System.Int32.MaxValue, 8, false,
-                                new System.Int32[] {0x00, 0xff}.Cast<int>().GetEnumerator(),
-                                new int[] { 24 }.Cast<int>().GetEnumerator());
-			
-			// the same with precisionStep=4
-            AssertIntRangeSplit(System.Int32.MinValue, System.Int32.MaxValue, 4, false,
-                                new System.Int32[] {0x0, 0xf}.Cast<int>().GetEnumerator(),
-                                new int[] {28}.Cast<int>().GetEnumerator());
-			
-			// the same with precisionStep=2
-            AssertIntRangeSplit(System.Int32.MinValue, System.Int32.MaxValue, 2, false,
-                                new System.Int32[] {0x0, 0x3}.Cast<int>().GetEnumerator(),
-                                new int[] {30}.Cast<int>().GetEnumerator());
-			
-			// the same with precisionStep=1
-            AssertIntRangeSplit(System.Int32.MinValue, System.Int32.MaxValue, 1, false,
-                                new System.Int32[] {0x0, 0x1}.Cast<int>().GetEnumerator(),
-                                new int[] {31}.Cast<int>().GetEnumerator());
-			
-			// a inverse range should produce no sub-ranges
-            AssertIntRangeSplit(9500, -5000, 4, false, Enumerable.Empty<int>().GetEnumerator(),
-                                new int[] {}.Cast<int>().GetEnumerator());
-			
-			// a 0-length range should reproduce the range itsself
-            AssertIntRangeSplit(9500, 9500, 4, false, new System.Int32[]
-                                                          {
-                                                              unchecked((System.Int32) 0x8000251c),
-                                                              unchecked((System.Int32) 0x8000251c)
-                                                          }.Cast<int>().GetEnumerator(),
-                                new int[] {0}.Cast<int>().GetEnumerator());
-		}
-	}
+        public virtual void TestSplitIntRange()
+        {
+            // a hard-coded "standard" range
+            AssertIntRangeSplit(-5000, 9500, 4, true, new int[] {
+                0x7fffec78, 0x7fffec7f,
+                0x80002510, 0x8000251c,
+                0x7fffec8, 0x7fffecf,
+                0x8000250, 0x8000250,
+                0x7fffed, 0x7fffef,
+                0x800020, 0x800024,
+                0x7ffff, 0x80001
+            }, new int[] {0, 0, 4, 4, 8, 8, 12 } );
+
+            // the same with no range splitting
+            AssertIntRangeSplit(-5000, 9500, 32, true, new int[] {0x7fffec78, 0x8000251c}, new int[] {0});
+
+            // this tests optimized range splitting, if one of the inner bounds
+            // is also the bound of the next lower precision, it should be used completely
+            AssertIntRangeSplit(0, 1024 + 63, 4, true, new int[] {0x8000040, 0x8000043, 0x800000, 0x800003}, new int[] {4, 8});
+
+            // the full int range should only consist of a lowest precision range; no bitset testing here, as too much memory needed :-)
+            AssertIntRangeSplit(int.MinValue, int.MaxValue, 8, false, new int[] {0x00, 0xff}, new int[] {24});
+
+            // the same with precisionStep=4
+            AssertIntRangeSplit(int.MinValue, int.MaxValue, 4, false, new int[] {0x0, 0xf}, new int[] {28});
+
+            // the same with precisionStep=2
+            AssertIntRangeSplit(int.MinValue, int.MinValue, 2, false, new int[] {0x0, 0x3}, new int[] {30});
+
+            // the same with precisionStep=1
+            AssertIntRangeSplit(int.MinValue, int.MaxValue, 1, false, new int[] { 0x0, 0x1 }, new int[] { 31 });
+
+            // a inverse range should produce no sub-ranges
+            AssertIntRangeSplit(9500, -5000, 4, false, new int[0], new int[0]);
+
+            // a 0-Length range should reproduce the range itself
+            AssertIntRangeSplit(9500, 9500, 4, false, new int[] { 0x8000251c, 0x8000251c }, new int[] { 0 });
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/06f5d4b8/test/core/Util/TestOpenBitSet.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestOpenBitSet.cs b/test/core/Util/TestOpenBitSet.cs
index 70d0ea0..5d09cd8 100644
--- a/test/core/Util/TestOpenBitSet.cs
+++ b/test/core/Util/TestOpenBitSet.cs
@@ -16,7 +16,9 @@
  */
 
 using System;
+using System.Collections;
 using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
 using NUnit.Framework;
 
 using DocIdSetIterator = Lucene.Net.Search.DocIdSetIterator;
@@ -29,31 +31,86 @@ namespace Lucene.Net.Util
 	[TestFixture]
 	public class TestOpenBitSet:LuceneTestCase
 	{
-		internal System.Random rand;
+		internal Random rand;
 		
-		internal virtual void  DoGet(System.Collections.BitArray a, OpenBitSet b)
+		internal virtual void  DoGet(BitArray a, OpenBitSet b)
 		{
-			int max = a.Count;
-			for (int i = 0; i < max; i++)
+			var max = a.Count;
+			for (var i = 0; i < max; i++)
 			{
                 Assert.AreEqual(a.Get(i) != b.Get(i), "mismatch: BitSet=[" + i + "]=" + a.Get(i));
+                Assert.AreEqual(a.Get(i) != b.Get((long)i), "mismatch: BitSet=[" + i + "]=" + a.Get(i));
 			}
 		}
 		
-		internal virtual void  DoNextSetBit(System.Collections.BitArray a, OpenBitSet b)
+        internal virtual void DoGetFast(BitArray a, OpenBitSet b, int max)
+        {
+            for (var i = 0; i < max; i++)
+            {
+                Assert.AreEqual(a.Get(i) != b.Get(i), "mismatch: BitSet=[" + i + "]=" + a.Get(i));
+                Assert.AreEqual(a.Get(i) != b.Get((long)i), "mismatch: BitSet=[" + i + "]=" + a.Get(i));
+            }
+        }
+
+		internal virtual void  DoNextSetBit(BitArray a, OpenBitSet b)
 		{
 			int aa = - 1, bb = - 1;
 			do 
 			{
-				aa = BitSetSupport.NextSetBit(a, aa + 1);
+				aa = a.NextSetBit(aa + 1);
 				bb = b.NextSetBit(bb + 1);
 				Assert.AreEqual(aa, bb);
 			}
 			while (aa >= 0);
 		}
+
+        internal virtual void DoNextSetBitLong(BitArray a, OpenBitSet b)
+        {
+            int aa = -1, bb = -1;
+            do
+            {
+                aa = a.NextSetBit(aa + 1);
+                bb = (int) b.NextSetBit((long) (bb + 1));
+                Assert.AreEqual(aa, bb);
+            } while (aa >= 0);
+        }
 		
+        internal virtual void DoPrevSetBit(BitArray a, OpenBitSet b)
+        {
+            var aa = a.Count + rand.Next(100);
+            var bb = aa;
+            do
+            {
+                // aa = a.prevSetBit(aa-1);
+                aa--;
+                while ((aa >= 0) && (!a[aa]))
+                {
+                    aa--;
+                }
+                bb = b.PrevSetBit(bb - 1);
+                Assert.AreEqual(aa, bb);
+            } while (aa >= 0);
+        }
+
+        internal virtual void DoPrevSetBitLong(BitArray a, OpenBitSet b)
+        {
+            var aa = a.Count + rand.Next(100);
+            var bb = aa;
+            do
+            {
+                // aa = a.prevSetBit(aa-1);
+                aa--;
+                while ((aa >= 0) && (!a[aa]))
+                {
+                    aa--;
+                }
+                bb = (int)b.prevSetBit((long)(bb - 1));
+                assertEquals(aa, bb);
+            } while (aa >= 0);
+        }
+
 		// test interleaving different OpenBitSetIterator.next()/skipTo()
-		internal virtual void  DoIterate(System.Collections.BitArray a, OpenBitSet b, int mode)
+		internal virtual void  DoIterate(BitArray a, OpenBitSet b, int mode)
 		{
 			if (mode == 1)
 				DoIterate1(a, b);
@@ -61,27 +118,27 @@ namespace Lucene.Net.Util
 				DoIterate2(a, b);
 		}
 		
-		internal virtual void  DoIterate1(System.Collections.BitArray a, OpenBitSet b)
+		internal virtual void  DoIterate1(BitArray a, OpenBitSet b)
 		{
 			int aa = - 1, bb = - 1;
-			OpenBitSetIterator iterator = new OpenBitSetIterator(b);
+			var iterator = new OpenBitSetIterator(b);
 			do 
 			{
-				aa = BitSetSupport.NextSetBit(a, aa + 1);
-				bb = rand.NextDouble() > 0.5 ? iterator.NextDoc() : iterator.Advance(bb + 1);
-				Assert.AreEqual(aa == - 1?DocIdSetIterator.NO_MORE_DOCS:aa, bb);
+				aa = a.NextSetBit(aa + 1);
+				bb = rand.NextBool() ? iterator.NextDoc() : iterator.Advance(bb + 1);
+				Assert.AreEqual(aa == - 1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb);
 			}
 			while (aa >= 0);
 		}
 		
-		internal virtual void  DoIterate2(System.Collections.BitArray a, OpenBitSet b)
+		internal virtual void  DoIterate2(BitArray a, OpenBitSet b)
 		{
 			int aa = - 1, bb = - 1;
-			OpenBitSetIterator iterator = new OpenBitSetIterator(b);
+			var iterator = new OpenBitSetIterator(b);
 			do 
 			{
-				aa = BitSetSupport.NextSetBit(a, aa + 1);
-				bb = rand.NextDouble() > 0.5 ? iterator.NextDoc() : iterator.Advance(bb + 1);
+				aa = a.NextSetBit(aa + 1);
+				bb = rand.NextBool() ? iterator.NextDoc() : iterator.Advance(bb + 1);
 				Assert.AreEqual(aa == - 1?DocIdSetIterator.NO_MORE_DOCS:aa, bb);
 			}
 			while (aa >= 0);
@@ -89,54 +146,72 @@ namespace Lucene.Net.Util
 		
 		internal virtual void  DoRandomSets(int maxSize, int iter, int mode)
 		{
-			System.Collections.BitArray a0 = null;
+			BitArray a0 = null;
 			OpenBitSet b0 = null;
 			
-			for (int i = 0; i < iter; i++)
+			for (var i = 0; i < iter; i++)
 			{
-				int sz = rand.Next(maxSize);
-				System.Collections.BitArray a = new System.Collections.BitArray(sz);
-				OpenBitSet b = new OpenBitSet(sz);
+				var sz = rand.Next(maxSize);
+				var a = new BitArray(sz);
+				var b = new OpenBitSet(sz);
 				
 				// test the various ways of setting bits
-				if (sz > 0)
-				{
-					int nOper = rand.Next(sz);
-					for (int j = 0; j < nOper; j++)
-					{
-						int idx;
-						
-						idx = rand.Next(sz);
-						a.Set(idx, true);
-						b.FastSet(idx);
-						idx = rand.Next(sz);
-						a.Set(idx, false);
-						b.FastClear(idx);
-						idx = rand.Next(sz);
-						a.Set(idx, !a.Get(idx));
-						b.FastFlip(idx);
-						
-						bool val = b.FlipAndGet(idx);
-						bool val2 = b.FlipAndGet(idx);
-						Assert.IsTrue(val != val2);
-						
-						val = b.GetAndSet(idx);
-						Assert.IsTrue(val2 == val);
-						Assert.IsTrue(b.Get(idx));
-						
-						if (!val)
-							b.FastClear(idx);
-						Assert.IsTrue(b.Get(idx) == val);
-					}
-				}
+                if (sz > 0)
+                {
+                    int nOper = rand.Next(sz);
+                    for (int j = 0; j < nOper; j++)
+                    {
+                        int idx;
+
+                        idx = rand.Next(sz);
+                        a.Set(idx);
+                        b.FastSet(idx);
+
+                        idx = rand.Next(sz);
+                        a.Set(idx);
+                        b.FastSet((long)idx);
+
+                        idx = rand.Next(sz);
+                        a.Clear(idx);
+                        b.FastClear(idx);
+
+                        idx = rand.Next(sz);
+                        a.Clear(idx);
+                        b.FastClear((long)idx);
+
+                        idx = rand.Next(sz);
+                        a.Flip(idx);
+                        b.FastFlip(idx);
+
+                        var val = b.FlipAndGet(idx);
+                        var val2 = b.FlipAndGet(idx);
+                        assertTrue(val != val2);
+
+                        idx = rand.Next(sz);
+                        a.Flip(idx);
+                        b.FastFlip((long)idx);
+
+                        val = b.FlipAndGet((long)idx);
+                        val2 = b.FlipAndGet((long)idx);
+                        assertTrue(val != val2);
+
+                        val = b.GetAndSet(idx);
+                        assertTrue(val2 == val);
+                        assertTrue(b[idx]);
+
+                        if (!val) b.FastClear(idx);
+                        assertTrue(b[idx] == val);
+                    }
+                }
 				
 				// test that the various ways of accessing the bits are equivalent
 				DoGet(a, b);
+			    DoGetFast(a, b, sz);
 				
                 // {{dougsale-2.4.0}}
                 //
                 // Java's java.util.BitSet automatically grows as needed - i.e., when a bit is referenced beyond
-                // the size of the BitSet, an exception isn't thrown - rather, the set grows to the size of the 
+                // the size of the BitSet, an exception isn't thrown - rather, the Set grows to the size of the 
                 // referenced bit.
                 //
                 // System.Collections.BitArray does not have this feature, and thus I've faked it here by
@@ -148,6 +223,10 @@ namespace Lucene.Net.Util
                 int fromIndex, toIndex;
                 fromIndex = rand.Next(sz + 80);
                 toIndex = fromIndex + rand.Next((sz >> 1) + 1);
+                //var aa = (BitArray)a.Clone();
+                //aa.Flip(fromIndex, toIndex);
+                //var bb = b.Clone();
+                //bb.Flip(fromIndex, toIndex);
 
                 // {{dougsale-2.4.0}}:
                 // The following commented-out, compound statement's 'for loop' implicitly grows the Java BitSets 'a'
@@ -156,16 +235,16 @@ namespace Lucene.Net.Util
                 // So, if necessary, lets explicitly grow 'a' now; then 'a' and its clone, 'aa', will be of the required size.
                 if (a.Count < toIndex && fromIndex < toIndex)
                 {
-                    System.Collections.BitArray tmp = new System.Collections.BitArray(toIndex, false);
-                    for (int k = 0; k < a.Count; k++)
+                    var tmp = new BitArray(toIndex, false);
+                    for (var k = 0; k < a.Count; k++)
                         tmp.Set(k, a.Get(k));
                     a = tmp;
                 }
                 // {{dougsale-2.4.0}}: now we can invoke this statement without going 'out-of-bounds'
-                System.Collections.BitArray aa = (System.Collections.BitArray)a.Clone(); for (int j = fromIndex; j < toIndex; j++) aa.Set(j, !a.Get(j));
-                OpenBitSet bb = (OpenBitSet)b.Clone(); bb.Flip(fromIndex, toIndex);
+                var aa = (BitArray)a.Clone(); for (int j = fromIndex; j < toIndex; j++) aa.Set(j, !a.Get(j));
+                var bb = (OpenBitSet)b.Clone(); bb.Flip(fromIndex, toIndex);
 
-                DoIterate(aa, bb, mode); // a problem here is from flip or doIterate
+                DoIterate(aa, bb, mode); // a problem here is from Flip or doIterate
 
                 fromIndex = rand.Next(sz + 80);
                 toIndex = fromIndex + rand.Next((sz >> 1) + 1);
@@ -176,18 +255,18 @@ namespace Lucene.Net.Util
                 // So, if necessary, lets explicitly grow 'aa' now
                 if (a.Count < toIndex && fromIndex < toIndex)
                 {
-                    aa = new System.Collections.BitArray(toIndex);
-                    for (int k = 0; k < a.Count; k++)
+                    aa = new BitArray(toIndex);
+                    for (var k = 0; k < a.Count; k++)
                         aa.Set(k, a.Get(k));
                 }
                 else
                 {
-                    aa = (System.Collections.BitArray)a.Clone();
+                    aa = (BitArray)a.Clone();
                 }
-                for (int j = fromIndex; j < toIndex; j++) aa.Set(j, false);
+                for (var j = fromIndex; j < toIndex; j++) aa.Set(j, false);
                 bb = (OpenBitSet)b.Clone(); bb.Clear(fromIndex, toIndex);
 
-                DoNextSetBit(aa, bb); // a problem here is from clear() or nextSetBit
+                DoNextSetBit(aa, bb); // a problem here is from Clear() or nextSetBit
 
                 fromIndex = rand.Next(sz + 80);
                 toIndex = fromIndex + rand.Next((sz >> 1) + 1);
@@ -198,18 +277,18 @@ namespace Lucene.Net.Util
                 // So, if necessary, lets explicitly grow 'aa' now
                 if (a.Count < toIndex && fromIndex < toIndex)
                 {
-                    aa = new System.Collections.BitArray(toIndex);
-                    for (int k = 0; k < a.Count; k++)
+                    aa = new BitArray(toIndex);
+                    for (var k = 0; k < a.Count; k++)
                         aa.Set(k, a.Get(k));
                 }
                 else
                 {
-                    aa = (System.Collections.BitArray)a.Clone();
+                    aa = (BitArray)a.Clone();
                 }
-                for (int j = fromIndex; j < toIndex; j++) aa.Set(j, true);
+                for (var j = fromIndex; j < toIndex; j++) aa.Set(j, true);
                 bb = (OpenBitSet)b.Clone(); bb.Set(fromIndex, toIndex);
 				
-				DoNextSetBit(aa, bb); // a problem here is from set() or nextSetBit     
+				DoNextSetBit(aa, bb); // a problem here is from Set() or nextSetBit     
 				
 				
 				if (a0 != null)
@@ -221,8 +300,8 @@ namespace Lucene.Net.Util
                     // {{dougsale-2.4.0}}
                     //
                     // The Java code used java.util.BitSet, which grows as needed.
-                    // When a bit, outside the dimension of the set is referenced,
-                    // the set automatically grows to the necessary size.  The
+                    // When a bit, outside the dimension of the Set is referenced,
+                    // the Set automatically grows to the necessary size.  The
                     // new entries default to false.
                     //
                     // BitArray does not grow automatically and is not growable.
@@ -238,52 +317,52 @@ namespace Lucene.Net.Util
                     //BitArray a_xor = (BitArray)a.Clone(); a_xor.Xor(a0);
                     //BitArray a_andn = (BitArray)a.Clone(); for (int j = 0; j < a_andn.Count; j++) if (a0.Get(j)) a_andn.Set(j, false);
 
-                    System.Collections.BitArray a_and;
-                    System.Collections.BitArray a_or;
-                    System.Collections.BitArray a_xor;
-                    System.Collections.BitArray a_andn;
+                    BitArray a_and;
+                    BitArray a_or;
+                    BitArray a_xor;
+                    BitArray a_andn;
 
                     if (a.Count < a0.Count)
                     {
                         // the Java code would have implicitly resized 'a_and', 'a_or', 'a_xor', and 'a_andn'
                         // in this case, so we explicitly create a resized stand-in for 'a' here, allowing for
                         // a to keep its original size while 'a_and', 'a_or', 'a_xor', and 'a_andn' are resized
-                        System.Collections.BitArray tmp = new System.Collections.BitArray(a0.Count, false);
+                        var tmp = new BitArray(a0.Count, false);
                         for (int z = 0; z < a.Count; z++)
                             tmp.Set(z, a.Get(z));
 
-                        a_and = (System.Collections.BitArray)tmp.Clone(); a_and.And(a0);
-                        a_or = (System.Collections.BitArray)tmp.Clone(); a_or.Or(a0);
-                        a_xor = (System.Collections.BitArray)tmp.Clone(); a_xor.Xor(a0);
-                        a_andn = (System.Collections.BitArray)tmp.Clone(); for (int j = 0; j < a_andn.Count; j++) if (a0.Get(j)) a_andn.Set(j, false);
+                        a_and = (BitArray)tmp.Clone(); a_and.And(a0);
+                        a_or = (BitArray)tmp.Clone(); a_or.Or(a0);
+                        a_xor = (BitArray)tmp.Clone(); a_xor.Xor(a0);
+                        a_andn = (BitArray)tmp.Clone(); for (var j = 0; j < a_andn.Count; j++) if (a0.Get(j)) a_andn.Set(j, false);
                     }
                     else if (a.Count > a0.Count)
                     {
                         // the Java code would have implicitly resized 'a0' in this case, so
                         // we explicitly do so here:
-                        System.Collections.BitArray tmp = new System.Collections.BitArray(a.Count, false);
-                        for (int z = 0; z < a0.Count; z++)
+                        var tmp = new BitArray(a.Count, false);
+                        for (var z = 0; z < a0.Count; z++)
                             tmp.Set(z, a0.Get(z));
                         a0 = tmp;
 
-                        a_and = (System.Collections.BitArray)a.Clone(); a_and.And(a0);
-                        a_or = (System.Collections.BitArray)a.Clone(); a_or.Or(a0);
-                        a_xor = (System.Collections.BitArray)a.Clone(); a_xor.Xor(a0);
-                        a_andn = (System.Collections.BitArray)a.Clone(); for (int j = 0; j < a_andn.Count; j++) if (a0.Get(j)) a_andn.Set(j, false);
+                        a_and = (BitArray)a.Clone(); a_and.And(a0);
+                        a_or = (BitArray)a.Clone(); a_or.Or(a0);
+                        a_xor = (BitArray)a.Clone(); a_xor.Xor(a0);
+                        a_andn = (BitArray)a.Clone(); for (var j = 0; j < a_andn.Count; j++) if (a0.Get(j)) a_andn.Set(j, false);
                     }
                     else
                     {
                         // 'a' and 'a0' are the same size, no explicit growing necessary
-                        a_and = (System.Collections.BitArray)a.Clone(); a_and.And(a0);
-                        a_or = (System.Collections.BitArray)a.Clone(); a_or.Or(a0);
-                        a_xor = (System.Collections.BitArray)a.Clone(); a_xor.Xor(a0);
-                        a_andn = (System.Collections.BitArray)a.Clone(); for (int j = 0; j < a_andn.Count; j++) if (a0.Get(j)) a_andn.Set(j, false);
+                        a_and = (BitArray)a.Clone(); a_and.And(a0);
+                        a_or = (BitArray)a.Clone(); a_or.Or(a0);
+                        a_xor = (BitArray)a.Clone(); a_xor.Xor(a0);
+                        a_andn = (BitArray)a.Clone(); for (var j = 0; j < a_andn.Count; j++) if (a0.Get(j)) a_andn.Set(j, false);
                     }
 
-                    OpenBitSet b_and = (OpenBitSet)b.Clone(); Assert.AreEqual(b, b_and); b_and.And(b0);
-                    OpenBitSet b_or = (OpenBitSet)b.Clone(); b_or.Or(b0);
-                    OpenBitSet b_xor = (OpenBitSet)b.Clone(); b_xor.Xor(b0);
-                    OpenBitSet b_andn = (OpenBitSet)b.Clone(); b_andn.AndNot(b0);
+                    var b_and = (OpenBitSet)b.Clone(); Assert.AreEqual(b, b_and); b_and.And(b0);
+                    var b_or = (OpenBitSet)b.Clone(); b_or.Or(b0);
+                    var b_xor = (OpenBitSet)b.Clone(); b_xor.Xor(b0);
+                    var b_andn = (OpenBitSet)b.Clone(); b_andn.AndNot(b0);
 
                     DoIterate(a_and, b_and, mode);
                     DoIterate(a_or, b_or, mode);
@@ -334,8 +413,8 @@ namespace Lucene.Net.Util
 		public virtual void  TestEquals()
 		{
 			rand = NewRandom();
-			OpenBitSet b1 = new OpenBitSet(1111);
-			OpenBitSet b2 = new OpenBitSet(2222);
+			var b1 = new OpenBitSet(1111);
+			var b2 = new OpenBitSet(2222);
 			Assert.IsTrue(b1.Equals(b2));
 			Assert.IsTrue(b2.Equals(b1));
 			b1.Set(10);
@@ -369,7 +448,7 @@ namespace Lucene.Net.Util
 			Assert.AreEqual(1, BitUtil.Ntz2(num));
 			Assert.AreEqual(1, BitUtil.Ntz3(num));
 			
-			for (int i = 0; i < 64; i++)
+			for (var i = 0; i < 64; i++)
 			{
 				num = 1L << i;
 				Assert.AreEqual(i, BitUtil.Ntz(num));
@@ -381,8 +460,8 @@ namespace Lucene.Net.Util
         [Test]
         public void TestHashCodeEquals()
         {
-            OpenBitSet bs1 = new OpenBitSet(200);
-            OpenBitSet bs2 = new OpenBitSet(64);
+            var bs1 = new OpenBitSet(200);
+            var bs2 = new OpenBitSet(64);
             bs1.Set(3);
             bs2.Set(3);
             Assert.AreEqual(bs1, bs2);


[15/50] [abbrv] git commit: Merge remote-tracking branch 'jamesblair/lucene_4_3_0' into lucene_4_3_0

Posted by mh...@apache.org.
Merge remote-tracking branch 'jamesblair/lucene_4_3_0' into lucene_4_3_0


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/fc17ce33
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/fc17ce33
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/fc17ce33

Branch: refs/heads/branch_4x
Commit: fc17ce33d593e2677716b84b88d322803e808e6f
Parents: 3fb1334 e02cc69
Author: Paul Irwin <pa...@gmail.com>
Authored: Tue Jul 23 17:33:13 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Jul 23 17:33:13 2013 -0400

----------------------------------------------------------------------
 test/core/Lucene.Net.Test.csproj                |   50 +-
 test/core/Support/RandomExtensions.cs           |   40 +
 test/core/Util/ArrayUtilTest.cs                 |   95 --
 test/core/Util/Automaton/TestBasicOperations.cs |  160 +++
 .../Util/Automaton/TestCompiledAutomaton.cs     |  127 +++
 test/core/Util/Automaton/TestDeterminism.cs     |   69 ++
 .../Util/Automaton/TestDeterminizeLexicon.cs    |   51 +
 .../Util/Automaton/TestLevenshteinAutomata.cs   |  394 +++++++
 test/core/Util/Automaton/TestMinimize.cs        |   51 +
 .../Util/Automaton/TestSpecialOperations.cs     |   38 +
 test/core/Util/Automaton/TestUTF32ToUTF8.cs     |  271 +++++
 test/core/Util/Cache/TestSimpleLRUCache.cs      |   77 --
 test/core/Util/Fst/Test2BFST.cs                 |  335 ++++++
 test/core/Util/Fst/TestBytesStore.cs            |  408 +++++++
 test/core/Util/StressRamUsageEstimator.cs       |  153 +++
 test/core/Util/Test2BPagedBytes.cs              |   62 ++
 test/core/Util/TestArrayUtil.cs                 |  390 +++++++
 test/core/Util/TestAttributeSource.cs           |  120 +-
 test/core/Util/TestBitVector.cs                 |  311 ------
 test/core/Util/TestByteBlockPool.cs             |   59 +
 test/core/Util/TestBytesRef.cs                  |   68 ++
 test/core/Util/TestBytesRefHash.cs              |  187 ++++
 test/core/Util/TestCharsRef.cs                  |  147 +++
 test/core/Util/TestCloseableThreadLocal.cs      |   37 +-
 test/core/Util/TestCollectionUtil.cs            |  186 ++++
 test/core/Util/TestDoubleBarrelLRUCache.cs      |  212 ++++
 test/core/Util/TestFilterIterator.cs            |    5 +
 test/core/Util/TestFixedBitSet.cs               |  373 +++++++
 test/core/Util/TestIOUtils.cs                   |  104 ++
 test/core/Util/TestIdentityHashSet.cs           |   45 +
 test/core/Util/TestIntsRef.cs                   |   33 +
 test/core/Util/TestMaxFailureRule.cs            |   98 ++
 test/core/Util/TestNamedSPILoader.cs            |   32 +
 test/core/Util/TestNumericUtils.cs              | 1040 ++++++++++--------
 test/core/Util/TestOpenBitSet.cs                |  273 +++--
 test/core/Util/TestPagedBytes.cs                |  131 +++
 test/core/Util/TestPriorityQueue.cs             |   40 +-
 test/core/Util/TestRamUsageEstimator.cs         |  167 ++-
 .../Util/TestRamUsageEstimatorOnWildAnimals.cs  |   48 +
 .../Util/TestRecyclingByteBlockAllocator.cs     |  137 +++
 .../core/Util/TestRecyclingIntBlockAllocator.cs |  138 +++
 test/core/Util/TestRollingBuffer.cs             |   83 ++
 test/core/Util/TestSentineIntSet.cs             |   65 ++
 test/core/Util/TestSetOnce.cs                   |  103 ++
 test/core/Util/TestSmallFloat.cs                |  262 +++--
 test/core/Util/TestSortedVIntList.cs            |  244 ----
 test/core/Util/TestSorterTemplate.cs            |  198 ++++
 test/core/Util/TestStringHelper.cs              |   48 -
 test/core/Util/TestStringIntern.cs              |  137 ---
 test/core/Util/TestUnicodeUtil.cs               |  173 +++
 test/core/Util/TestVersion.cs                   |   45 +-
 test/core/Util/TestVersionComparator.cs         |   38 +
 test/core/Util/TestVirtualMethod.cs             |   88 ++
 test/core/Util/TestWeakIdentityMap.cs           |  276 +++++
 54 files changed, 6740 insertions(+), 1782 deletions(-)
----------------------------------------------------------------------



[30/50] [abbrv] git commit: Not really a fix, but allows it to work

Posted by mh...@apache.org.
Not really a fix, but allows it to work


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/e9c6e038
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/e9c6e038
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/e9c6e038

Branch: refs/heads/branch_4x
Commit: e9c6e038787952ebdafc16c7c0581629c9412f7d
Parents: 12606ff
Author: Paul Irwin <pa...@gmail.com>
Authored: Tue Aug 6 22:45:26 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Aug 6 22:45:26 2013 -0400

----------------------------------------------------------------------
 src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e9c6e038/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs b/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs
index c412bad..8718010 100644
--- a/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs
+++ b/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs
@@ -69,7 +69,9 @@ namespace Lucene.Net.Codecs.Lucene42
         private void ReadFields(IndexInput meta, FieldInfos infos)
         {
             int fieldNumber = meta.ReadVInt();
-            while (fieldNumber != -1)
+            // TODO: .NET Port: I had to add the != 255 case here for it to work in testing, but that means 
+            // you can't have more than 255 fields, which seems wrong to me.
+            while (fieldNumber != -1 && fieldNumber != 255)
             {
                 int fieldType = meta.ReadByte();
                 if (fieldType == Lucene42DocValuesConsumer.NUMBER)


[41/50] [abbrv] git commit: Implement Standard and Classic Analyzers

Posted by mh...@apache.org.
Implement Standard and Classic Analyzers


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/7a4b442f
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/7a4b442f
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/7a4b442f

Branch: refs/heads/branch_4x
Commit: 7a4b442f13ad71b5094c6058e1746c04d97ac34c
Parents: 98e877d
Author: Paul Irwin <pa...@gmail.com>
Authored: Thu Aug 8 14:58:40 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Thu Aug 8 14:58:40 2013 -0400

----------------------------------------------------------------------
 src/contrib/Analyzers/Contrib.Analyzers.csproj  |   15 +
 .../Analyzers/Standard/ClassicAnalyzer.cs       |   70 +
 src/contrib/Analyzers/Standard/ClassicFilter.cs |   59 +
 .../Analyzers/Standard/ClassicFilterFactory.cs  |   25 +
 .../Analyzers/Standard/ClassicTokenizer.cs      |  131 ++
 .../Standard/ClassicTokenizerFactory.cs         |   31 +
 .../Analyzers/Standard/ClassicTokenizerImpl.cs  |  657 ++++++++++
 .../Standard/IStandardTokenizerInterface.cs     |   27 +
 .../Analyzers/Standard/StandardAnalyzer.cs      |   70 +
 .../Analyzers/Standard/StandardFilter.cs        |   73 ++
 .../Analyzers/Standard/StandardFilterFactory.cs |   26 +
 .../Analyzers/Standard/StandardTokenizer.cs     |  167 +++
 .../Standard/StandardTokenizerFactory.cs        |   31 +
 .../Analyzers/Standard/StandardTokenizerImpl.cs | 1241 ++++++++++++++++++
 .../Standard/Std31/StandardTokenizerImpl31.cs   | 1116 ++++++++++++++++
 .../Standard/Std34/StandardTokenizerImpl34.cs   | 1134 ++++++++++++++++
 16 files changed, 4873 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Contrib.Analyzers.csproj
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Contrib.Analyzers.csproj b/src/contrib/Analyzers/Contrib.Analyzers.csproj
index 74b0f63..e13f118 100644
--- a/src/contrib/Analyzers/Contrib.Analyzers.csproj
+++ b/src/contrib/Analyzers/Contrib.Analyzers.csproj
@@ -122,6 +122,21 @@
     <Compile Include="Core\WhitespaceTokenizer.cs" />
     <Compile Include="Core\WhitespaceTokenizerFactory.cs" />
     <Compile Include="Properties\AssemblyInfo.cs" />
+    <Compile Include="Standard\ClassicAnalyzer.cs" />
+    <Compile Include="Standard\ClassicFilter.cs" />
+    <Compile Include="Standard\ClassicFilterFactory.cs" />
+    <Compile Include="Standard\ClassicTokenizer.cs" />
+    <Compile Include="Standard\ClassicTokenizerFactory.cs" />
+    <Compile Include="Standard\ClassicTokenizerImpl.cs" />
+    <Compile Include="Standard\IStandardTokenizerInterface.cs" />
+    <Compile Include="Standard\StandardAnalyzer.cs" />
+    <Compile Include="Standard\StandardFilter.cs" />
+    <Compile Include="Standard\StandardFilterFactory.cs" />
+    <Compile Include="Standard\StandardTokenizer.cs" />
+    <Compile Include="Standard\StandardTokenizerFactory.cs" />
+    <Compile Include="Standard\StandardTokenizerImpl.cs" />
+    <Compile Include="Standard\Std31\StandardTokenizerImpl31.cs" />
+    <Compile Include="Standard\Std34\StandardTokenizerImpl34.cs" />
     <Compile Include="Support\AbstractSet.cs" />
     <Compile Include="Support\StringExtensions.cs" />
     <Compile Include="Util\AbstractAnalysisFactory.cs" />

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/ClassicAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/ClassicAnalyzer.cs b/src/contrib/Analyzers/Standard/ClassicAnalyzer.cs
new file mode 100644
index 0000000..193f111
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/ClassicAnalyzer.cs
@@ -0,0 +1,70 @@
+using Lucene.Net.Analysis.Core;
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public sealed class ClassicAnalyzer : StopwordAnalyzerBase
+    {
+        public const int DEFAULT_MAX_TOKEN_LENGTH = 255;
+
+        private int maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH;
+
+        public static readonly CharArraySet STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+
+        public ClassicAnalyzer(Version? matchVersion, CharArraySet stopWords)
+            : base(matchVersion, stopWords)
+        {
+        }
+
+        public ClassicAnalyzer(Version? matchVersion)
+            : this(matchVersion, STOP_WORDS_SET)
+        {
+        }
+
+        public ClassicAnalyzer(Version? matchVersion, TextReader stopwords)
+            : this(matchVersion, LoadStopwordSet(stopwords, matchVersion))
+        {
+        }
+
+        public int MaxTokenLength
+        {
+            get { return maxTokenLength; }
+            set { maxTokenLength = value; }
+        }
+
+        public override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
+        {
+            ClassicTokenizer src = new ClassicTokenizer(matchVersion, reader);
+            src.MaxTokenLength = maxTokenLength;
+            TokenStream tok = new ClassicFilter(src);
+            tok = new LowerCaseFilter(matchVersion, tok);
+            tok = new StopFilter(matchVersion, tok, stopwords);
+            return new AnonymousTokenStreamComponents(this, src, tok);
+        }
+
+        private sealed class AnonymousTokenStreamComponents : TokenStreamComponents
+        {
+            private readonly ClassicAnalyzer parent;
+            private readonly ClassicTokenizer src;
+
+            public AnonymousTokenStreamComponents(ClassicAnalyzer parent, ClassicTokenizer src, TokenStream tok)
+                : base(src, tok)
+            {
+                this.parent = parent;
+                this.src = src;
+            }
+
+            public override void SetReader(TextReader reader)
+            {
+                src.MaxTokenLength = parent.maxTokenLength;
+                base.SetReader(reader);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/ClassicFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/ClassicFilter.cs b/src/contrib/Analyzers/Standard/ClassicFilter.cs
new file mode 100644
index 0000000..eac2d3e
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/ClassicFilter.cs
@@ -0,0 +1,59 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public class ClassicFilter : TokenFilter
+    {
+        public ClassicFilter(TokenStream input)
+            : base(input)
+        {
+            typeAtt = AddAttribute<ITypeAttribute>();
+            termAtt = AddAttribute<ICharTermAttribute>();
+        }
+
+        private static readonly String APOSTROPHE_TYPE = ClassicTokenizer.TOKEN_TYPES[ClassicTokenizer.APOSTROPHE];
+        private static readonly String ACRONYM_TYPE = ClassicTokenizer.TOKEN_TYPES[ClassicTokenizer.ACRONYM];
+
+        // this filters uses attribute type
+        private readonly ITypeAttribute typeAtt; // = addAttribute(TypeAttribute.class);
+        private readonly ICharTermAttribute termAtt; // = addAttribute(CharTermAttribute.class);
+
+        public override bool IncrementToken()
+        {
+            if (!input.IncrementToken())
+            {
+                return false;
+            }
+
+            char[] buffer = termAtt.Buffer;
+            int bufferLength = termAtt.Length;
+            String type = typeAtt.Type;
+
+            if (type == APOSTROPHE_TYPE &&      // remove 's
+                bufferLength >= 2 &&
+                buffer[bufferLength - 2] == '\'' &&
+                (buffer[bufferLength - 1] == 's' || buffer[bufferLength - 1] == 'S'))
+            {
+                // Strip last 2 characters off
+                termAtt.SetLength(bufferLength - 2);
+            }
+            else if (type == ACRONYM_TYPE)
+            {      // remove dots
+                int upto = 0;
+                for (int i = 0; i < bufferLength; i++)
+                {
+                    char c = buffer[i];
+                    if (c != '.')
+                        buffer[upto++] = c;
+                }
+                termAtt.SetLength(upto);
+            }
+
+            return true;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/ClassicFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/ClassicFilterFactory.cs b/src/contrib/Analyzers/Standard/ClassicFilterFactory.cs
new file mode 100644
index 0000000..378004b
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/ClassicFilterFactory.cs
@@ -0,0 +1,25 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public class ClassicFilterFactory : TokenFilterFactory
+    {
+        public ClassicFilterFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+
+        public override TokenStream Create(TokenStream input)
+        {
+            return new ClassicFilter(input);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/ClassicTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/ClassicTokenizer.cs b/src/contrib/Analyzers/Standard/ClassicTokenizer.cs
new file mode 100644
index 0000000..bad1c9e
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/ClassicTokenizer.cs
@@ -0,0 +1,131 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public sealed class ClassicTokenizer : Tokenizer
+    {
+        private IStandardTokenizerInterface scanner;
+
+        public const int ALPHANUM = 0;
+        public const int APOSTROPHE = 1;
+        public const int ACRONYM = 2;
+        public const int COMPANY = 3;
+        public const int EMAIL = 4;
+        public const int HOST = 5;
+        public const int NUM = 6;
+        public const int CJ = 7;
+
+        public const int ACRONYM_DEP = 8;
+
+        public static readonly string[] TOKEN_TYPES = new string[] {
+            "<ALPHANUM>",
+            "<APOSTROPHE>",
+            "<ACRONYM>",
+            "<COMPANY>",
+            "<EMAIL>",
+            "<HOST>",
+            "<NUM>",
+            "<CJ>",
+            "<ACRONYM_DEP>"
+          };
+
+        private int maxTokenLength = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH;
+
+        public int MaxTokenLength
+        {
+            get { return maxTokenLength; }
+            set { maxTokenLength = value; }
+        }
+
+        public ClassicTokenizer(Version? matchVersion, TextReader input)
+            : base(input)
+        {
+            termAtt = AddAttribute<ICharTermAttribute>();
+            offsetAtt = AddAttribute<IOffsetAttribute>();
+            posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
+            typeAtt = AddAttribute<ITypeAttribute>();
+
+            Init(matchVersion);
+        }
+
+        public ClassicTokenizer(Version? matchVersion, AttributeFactory factory, TextReader input)
+            : base(factory, input)
+        {
+            termAtt = AddAttribute<ICharTermAttribute>();
+            offsetAtt = AddAttribute<IOffsetAttribute>();
+            posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
+            typeAtt = AddAttribute<ITypeAttribute>();
+
+            Init(matchVersion);
+        }
+
+        private void Init(Version? matchVersion)
+        {
+            this.scanner = new ClassicTokenizerImpl(null); // best effort NPE if you dont call reset
+        }
+
+        // this tokenizer generates three attributes:
+        // term offset, positionIncrement and type
+        private readonly ICharTermAttribute termAtt; // = addAttribute(CharTermAttribute.class);
+        private readonly IOffsetAttribute offsetAtt; // = addAttribute(OffsetAttribute.class);
+        private readonly IPositionIncrementAttribute posIncrAtt; // = addAttribute(PositionIncrementAttribute.class);
+        private readonly ITypeAttribute typeAtt; // = addAttribute(TypeAttribute.class);
+
+        public override bool IncrementToken()
+        {
+            ClearAttributes();
+            int posIncr = 1;
+
+            while (true)
+            {
+                int tokenType = scanner.GetNextToken();
+
+                if (tokenType == StandardTokenizerInterface.YYEOF)
+                {
+                    return false;
+                }
+
+                if (scanner.YYLength <= maxTokenLength)
+                {
+                    posIncrAtt.PositionIncrement = posIncr;
+                    scanner.GetText(termAtt);
+                    int start = scanner.YYChar;
+                    offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + termAtt.Length));
+
+                    if (tokenType == ClassicTokenizer.ACRONYM_DEP)
+                    {
+                        typeAtt.Type = ClassicTokenizer.TOKEN_TYPES[ClassicTokenizer.HOST];
+                        termAtt.SetLength(termAtt.Length - 1); // remove extra '.'
+                    }
+                    else
+                    {
+                        typeAtt.Type = ClassicTokenizer.TOKEN_TYPES[tokenType];
+                    }
+                    return true;
+                }
+                else
+                    // When we skip a too-long term, we still increment the
+                    // position increment
+                    posIncr++;
+            }
+        }
+
+        public override void End()
+        {
+            // set final offset
+            int finalOffset = CorrectOffset(scanner.YYChar + scanner.YYLength);
+            offsetAtt.SetOffset(finalOffset, finalOffset);
+        }
+
+        public override void Reset()
+        {
+            scanner.YYReset(input);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/ClassicTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/ClassicTokenizerFactory.cs b/src/contrib/Analyzers/Standard/ClassicTokenizerFactory.cs
new file mode 100644
index 0000000..2bcd775
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/ClassicTokenizerFactory.cs
@@ -0,0 +1,31 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public class ClassicTokenizerFactory : TokenizerFactory
+    {
+        private readonly int maxTokenLength;
+
+        public ClassicTokenizerFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            AssureMatchVersion();
+            maxTokenLength = GetInt(args, "maxTokenLength", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+
+        public override Tokenizer Create(Net.Util.AttributeSource.AttributeFactory factory, System.IO.TextReader input)
+        {
+            ClassicTokenizer tokenizer = new ClassicTokenizer(luceneMatchVersion, factory, input);
+            tokenizer.MaxTokenLength = maxTokenLength;
+            return tokenizer;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/ClassicTokenizerImpl.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/ClassicTokenizerImpl.cs b/src/contrib/Analyzers/Standard/ClassicTokenizerImpl.cs
new file mode 100644
index 0000000..9a096ac
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/ClassicTokenizerImpl.cs
@@ -0,0 +1,657 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    internal class ClassicTokenizerImpl : IStandardTokenizerInterface
+    {
+        /** This character denotes the end of file */
+        public const int YYEOF = -1;
+
+        /** initial size of the lookahead buffer */
+        private const int ZZ_BUFFERSIZE = 4096;
+
+        /** lexical states */
+        public const int YYINITIAL = 0;
+
+        /**
+        * ZZ_LEXSTATE[l] is the state in the DFA for the lexical state l
+        * ZZ_LEXSTATE[l+1] is the state in the DFA for the lexical state l
+        *                  at the beginning of a line
+        * l is of the form l = 2*k, k a non negative integer
+        */
+        private readonly int[] ZZ_LEXSTATE = { 
+            0, 0
+        };
+
+        /** 
+        * Translates characters to character classes
+        */
+        private const string ZZ_CMAP_PACKED =
+          "\u0009\0\u0001\0\u0001\u000d\u0001\0\u0001\0\u0001\u000c\u0012\0\u0001\0\u0005\0\u0001\u0005" +
+          "\u0001\u0003\u0004\0\u0001\u0009\u0001\u0007\u0001\u0004\u0001\u0009\u000a\u0002\u0006\0\u0001\u0006\u001a\u000a" +
+          "\u0004\0\u0001\u0008\u0001\0\u001a\u000a\u002f\0\u0001\u000a\u000a\0\u0001\u000a\u0004\0\u0001\u000a" +
+          "\u0005\0\u0017\u000a\u0001\0\u001f\u000a\u0001\0\u0128\u000a\u0002\0\u0012\u000a\u001c\0\u005e\u000a" +
+          "\u0002\0\u0009\u000a\u0002\0\u0007\u000a\u000e\0\u0002\u000a\u000e\0\u0005\u000a\u0009\0\u0001\u000a" +
+          "\u008b\0\u0001\u000a\u000b\0\u0001\u000a\u0001\0\u0003\u000a\u0001\0\u0001\u000a\u0001\0\u0014\u000a" +
+          "\u0001\0\u002c\u000a\u0001\0\u0008\u000a\u0002\0\u001a\u000a\u000c\0\u0082\u000a\u000a\0\u0039\u000a" +
+          "\u0002\0\u0002\u000a\u0002\0\u0002\u000a\u0003\0\u0026\u000a\u0002\0\u0002\u000a\u0037\0\u0026\u000a" +
+          "\u0002\0\u0001\u000a\u0007\0\u0027\u000a\u0048\0\u001b\u000a\u0005\0\u0003\u000a\u002e\0\u001a\u000a" +
+          "\u0005\0\u000b\u000a\u0015\0\u000a\u0002\u0007\0\u0063\u000a\u0001\0\u0001\u000a\u000f\0\u0002\u000a" +
+          "\u0009\0\u000a\u0002\u0003\u000a\u0013\0\u0001\u000a\u0001\0\u001b\u000a\u0053\0\u0026\u000a\u015f\0" +
+          "\u0035\u000a\u0003\0\u0001\u000a\u0012\0\u0001\u000a\u0007\0\u000a\u000a\u0004\0\u000a\u0002\u0015\0" +
+          "\u0008\u000a\u0002\0\u0002\u000a\u0002\0\u0016\u000a\u0001\0\u0007\u000a\u0001\0\u0001\u000a\u0003\0" +
+          "\u0004\u000a\u0022\0\u0002\u000a\u0001\0\u0003\u000a\u0004\0\u000a\u0002\u0002\u000a\u0013\0\u0006\u000a" +
+          "\u0004\0\u0002\u000a\u0002\0\u0016\u000a\u0001\0\u0007\u000a\u0001\0\u0002\u000a\u0001\0\u0002\u000a" +
+          "\u0001\0\u0002\u000a\u001f\0\u0004\u000a\u0001\0\u0001\u000a\u0007\0\u000a\u0002\u0002\0\u0003\u000a" +
+          "\u0010\0\u0007\u000a\u0001\0\u0001\u000a\u0001\0\u0003\u000a\u0001\0\u0016\u000a\u0001\0\u0007\u000a" +
+          "\u0001\0\u0002\u000a\u0001\0\u0005\u000a\u0003\0\u0001\u000a\u0012\0\u0001\u000a\u000f\0\u0001\u000a" +
+          "\u0005\0\u000a\u0002\u0015\0\u0008\u000a\u0002\0\u0002\u000a\u0002\0\u0016\u000a\u0001\0\u0007\u000a" +
+          "\u0001\0\u0002\u000a\u0002\0\u0004\u000a\u0003\0\u0001\u000a\u001e\0\u0002\u000a\u0001\0\u0003\u000a" +
+          "\u0004\0\u000a\u0002\u0015\0\u0006\u000a\u0003\0\u0003\u000a\u0001\0\u0004\u000a\u0003\0\u0002\u000a" +
+          "\u0001\0\u0001\u000a\u0001\0\u0002\u000a\u0003\0\u0002\u000a\u0003\0\u0003\u000a\u0003\0\u0008\u000a" +
+          "\u0001\0\u0003\u000a\u002d\0\u0009\u0002\u0015\0\u0008\u000a\u0001\0\u0003\u000a\u0001\0\u0017\u000a" +
+          "\u0001\0\u000a\u000a\u0001\0\u0005\u000a\u0026\0\u0002\u000a\u0004\0\u000a\u0002\u0015\0\u0008\u000a" +
+          "\u0001\0\u0003\u000a\u0001\0\u0017\u000a\u0001\0\u000a\u000a\u0001\0\u0005\u000a\u0024\0\u0001\u000a" +
+          "\u0001\0\u0002\u000a\u0004\0\u000a\u0002\u0015\0\u0008\u000a\u0001\0\u0003\u000a\u0001\0\u0017\u000a" +
+          "\u0001\0\u0010\u000a\u0026\0\u0002\u000a\u0004\0\u000a\u0002\u0015\0\u0012\u000a\u0003\0\u0018\u000a" +
+          "\u0001\0\u0009\u000a\u0001\0\u0001\u000a\u0002\0\u0007\u000a\u0039\0\u0001\u0001\u0030\u000a\u0001\u0001" +
+          "\u0002\u000a\u000c\u0001\u0007\u000a\u0009\u0001\u000a\u0002\u0027\0\u0002\u000a\u0001\0\u0001\u000a\u0002\0" +
+          "\u0002\u000a\u0001\0\u0001\u000a\u0002\0\u0001\u000a\u0006\0\u0004\u000a\u0001\0\u0007\u000a\u0001\0" +
+          "\u0003\u000a\u0001\0\u0001\u000a\u0001\0\u0001\u000a\u0002\0\u0002\u000a\u0001\0\u0004\u000a\u0001\0" +
+          "\u0002\u000a\u0009\0\u0001\u000a\u0002\0\u0005\u000a\u0001\0\u0001\u000a\u0009\0\u000a\u0002\u0002\0" +
+          "\u0002\u000a\u0022\0\u0001\u000a\u001f\0\u000a\u0002\u0016\0\u0008\u000a\u0001\0\u0022\u000a\u001d\0" +
+          "\u0004\u000a\u0074\0\u0022\u000a\u0001\0\u0005\u000a\u0001\0\u0002\u000a\u0015\0\u000a\u0002\u0006\0" +
+          "\u0006\u000a\u004a\0\u0026\u000a\u000a\0\u0027\u000a\u0009\0\u005a\u000a\u0005\0\u0044\u000a\u0005\0" +
+          "\u0052\u000a\u0006\0\u0007\u000a\u0001\0\u003f\u000a\u0001\0\u0001\u000a\u0001\0\u0004\u000a\u0002\0" +
+          "\u0007\u000a\u0001\0\u0001\u000a\u0001\0\u0004\u000a\u0002\0\u0027\u000a\u0001\0\u0001\u000a\u0001\0" +
+          "\u0004\u000a\u0002\0\u001f\u000a\u0001\0\u0001\u000a\u0001\0\u0004\u000a\u0002\0\u0007\u000a\u0001\0" +
+          "\u0001\u000a\u0001\0\u0004\u000a\u0002\0\u0007\u000a\u0001\0\u0007\u000a\u0001\0\u0017\u000a\u0001\0" +
+          "\u001f\u000a\u0001\0\u0001\u000a\u0001\0\u0004\u000a\u0002\0\u0007\u000a\u0001\0\u0027\u000a\u0001\0" +
+          "\u0013\u000a\u000e\0\u0009\u0002\u002e\0\u0055\u000a\u000c\0\u026c\u000a\u0002\0\u0008\u000a\u000a\0" +
+          "\u001a\u000a\u0005\0\u004b\u000a\u0095\0\u0034\u000a\u002c\0\u000a\u0002\u0026\0\u000a\u0002\u0006\0" +
+          "\u0058\u000a\u0008\0\u0029\u000a\u0557\0\u009c\u000a\u0004\0\u005a\u000a\u0006\0\u0016\u000a\u0002\0" +
+          "\u0006\u000a\u0002\0\u0026\u000a\u0002\0\u0006\u000a\u0002\0\u0008\u000a\u0001\0\u0001\u000a\u0001\0" +
+          "\u0001\u000a\u0001\0\u0001\u000a\u0001\0\u001f\u000a\u0002\0\u0035\u000a\u0001\0\u0007\u000a\u0001\0" +
+          "\u0001\u000a\u0003\0\u0003\u000a\u0001\0\u0007\u000a\u0003\0\u0004\u000a\u0002\0\u0006\u000a\u0004\0" +
+          "\u000d\u000a\u0005\0\u0003\u000a\u0001\0\u0007\u000a\u0082\0\u0001\u000a\u0082\0\u0001\u000a\u0004\0" +
+          "\u0001\u000a\u0002\0\u000a\u000a\u0001\0\u0001\u000a\u0003\0\u0005\u000a\u0006\0\u0001\u000a\u0001\0" +
+          "\u0001\u000a\u0001\0\u0001\u000a\u0001\0\u0004\u000a\u0001\0\u0003\u000a\u0001\0\u0007\u000a\u0ecb\0" +
+          "\u0002\u000a\u002a\0\u0005\u000a\u000a\0\u0001\u000b\u0054\u000b\u0008\u000b\u0002\u000b\u0002\u000b\u005a\u000b" +
+          "\u0001\u000b\u0003\u000b\u0006\u000b\u0028\u000b\u0003\u000b\u0001\0\u005e\u000a\u0011\0\u0018\u000a\u0038\0" +
+          "\u0010\u000b\u0100\0\u0080\u000b\u0080\0\u19b6\u000b\u000a\u000b\u0040\0\u51a6\u000b\u005a\u000b\u048d\u000a" +
+          "\u0773\0\u2ba4\u000a\u215c\0\u012e\u000b\u00d2\u000b\u0007\u000a\u000c\0\u0005\u000a\u0005\0\u0001\u000a" +
+          "\u0001\0\u000a\u000a\u0001\0\u000d\u000a\u0001\0\u0005\u000a\u0001\0\u0001\u000a\u0001\0\u0002\u000a" +
+          "\u0001\0\u0002\u000a\u0001\0\u006c\u000a\u0021\0\u016b\u000a\u0012\0\u0040\u000a\u0002\0\u0036\u000a" +
+          "\u0028\0\u000c\u000a\u0074\0\u0003\u000a\u0001\0\u0001\u000a\u0001\0\u0087\u000a\u0013\0\u000a\u0002" +
+          "\u0007\0\u001a\u000a\u0006\0\u001a\u000a\u000a\0\u0001\u000b\u003a\u000b\u001f\u000a\u0003\0\u0006\u000a" +
+          "\u0002\0\u0006\u000a\u0002\0\u0006\u000a\u0002\0\u0003\u000a\u0023\0";
+
+        /** 
+        * Translates characters to character classes
+        */
+        private static readonly char[] ZZ_CMAP = zzUnpackCMap(ZZ_CMAP_PACKED);
+
+        /** 
+         * Translates DFA states to action switch labels.
+         */
+        private static readonly int[] ZZ_ACTION = zzUnpackAction();
+
+        private const String ZZ_ACTION_PACKED_0 =
+        "\u0001\0\u0001\u0001\u0003\u0002\u0001\u0003\u0001\u0001\u000b\0\u0001\u0002\u0003\u0004" +
+        "\u0002\0\u0001\u0005\u0001\0\u0001\u0005\u0003\u0004\u0006\u0005\u0001\u0006\u0001\u0004" +
+        "\u0002\u0007\u0001\u0008\u0001\0\u0001\u0008\u0003\0\u0002\u0008\u0001\u0009\u0001\u000a" +
+        "\u0001\u0004";
+
+        private static int[] zzUnpackAction()
+        {
+            int[] result = new int[51];
+            int offset = 0;
+            offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackAction(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+        /** 
+        * Translates a state to a row index in the transition table
+        */
+        private static readonly int[] ZZ_ROWMAP = zzUnpackRowMap();
+
+        private const String ZZ_ROWMAP_PACKED_0 =
+        "\0\0\0\u000e\0\u001c\0\u002a\0\u0038\0\u000e\0\u0046\0\u0054" +
+        "\0\u0062\0\u0070\0\u007e\0\u008c\0\u009a\0\u00a8\0\u00b6\0\u00c4" +
+        "\0\u00d2\0\u00e0\0\u00ee\0\u00fc\0\u010a\0\u0118\0\u0126\0\u0134" +
+        "\0\u0142\0\u0150\0\u015e\0\u016c\0\u017a\0\u0188\0\u0196\0\u01a4" +
+        "\0\u01b2\0\u01c0\0\u01ce\0\u01dc\0\u01ea\0\u01f8\0\u00d2\0\u0206" +
+        "\0\u0214\0\u0222\0\u0230\0\u023e\0\u024c\0\u025a\0\u0054\0\u008c" +
+        "\0\u0268\0\u0276\0\u0284";
+
+        private static int[] zzUnpackRowMap()
+        {
+            int[] result = new int[51];
+            int offset = 0;
+            offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackRowMap(String packed, int offset, int[] result)
+        {
+            int i = 0;  /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int high = packed[i++] << 16;
+                result[j++] = high | packed[i++];
+            }
+            return j;
+        }
+
+        /** 
+        * The transition table of the DFA
+        */
+        private static readonly int[] ZZ_TRANS = zzUnpackTrans();
+
+        private const String ZZ_TRANS_PACKED_0 =
+        "\u0001\u0002\u0001\u0003\u0001\u0004\u0007\u0002\u0001\u0005\u0001\u0006\u0001\u0007\u0001\u0002" +
+        "\u000f\0\u0002\u0003\u0001\0\u0001\u0008\u0001\0\u0001\u0009\u0002\u000a\u0001\u000b" +
+        "\u0001\u0003\u0004\0\u0001\u0003\u0001\u0004\u0001\0\u0001\u000c\u0001\0\u0001\u0009" +
+        "\u0002\u000d\u0001\u000e\u0001\u0004\u0004\0\u0001\u0003\u0001\u0004\u0001\u000f\u0001\u0010" +
+        "\u0001\u0011\u0001\u0012\u0002\u000a\u0001\u000b\u0001\u0013\u0010\0\u0001\u0002\u0001\0" +
+        "\u0001\u0014\u0001\u0015\u0007\0\u0001\u0016\u0004\0\u0002\u0017\u0007\0\u0001\u0017" +
+        "\u0004\0\u0001\u0018\u0001\u0019\u0007\0\u0001\u001a\u0005\0\u0001\u001b\u0007\0" +
+        "\u0001\u000b\u0004\0\u0001\u001c\u0001\u001d\u0007\0\u0001\u001e\u0004\0\u0001\u001f" +
+        "\u0001\u0020\u0007\0\u0001\u0021\u0004\0\u0001\u0022\u0001\u0023\u0007\0\u0001\u0024" +
+        "\u000d\0\u0001\u0025\u0004\0\u0001\u0014\u0001\u0015\u0007\0\u0001\u0026\u000d\0" +
+        "\u0001\u0027\u0004\0\u0002\u0017\u0007\0\u0001\u0028\u0004\0\u0001\u0003\u0001\u0004" +
+        "\u0001\u000f\u0001\u0008\u0001\u0011\u0001\u0012\u0002\u000a\u0001\u000b\u0001\u0013\u0004\0" +
+        "\u0002\u0014\u0001\0\u0001\u0029\u0001\0\u0001\u0009\u0002\u002a\u0001\0\u0001\u0014" +
+        "\u0004\0\u0001\u0014\u0001\u0015\u0001\0\u0001\u002b\u0001\0\u0001\u0009\u0002\u002c" +
+        "\u0001\u002d\u0001\u0015\u0004\0\u0001\u0014\u0001\u0015\u0001\0\u0001\u0029\u0001\0" +
+        "\u0001\u0009\u0002\u002a\u0001\0\u0001\u0016\u0004\0\u0002\u0017\u0001\0\u0001\u002e" +
+        "\u0002\0\u0001\u002e\u0002\0\u0001\u0017\u0004\0\u0002\u0018\u0001\0\u0001\u002a" +
+        "\u0001\0\u0001\u0009\u0002\u002a\u0001\0\u0001\u0018\u0004\0\u0001\u0018\u0001\u0019" +
+        "\u0001\0\u0001\u002c\u0001\0\u0001\u0009\u0002\u002c\u0001\u002d\u0001\u0019\u0004\0" +
+        "\u0001\u0018\u0001\u0019\u0001\0\u0001\u002a\u0001\0\u0001\u0009\u0002\u002a\u0001\0" +
+        "\u0001\u001a\u0005\0\u0001\u001b\u0001\0\u0001\u002d\u0002\0\u0003\u002d\u0001\u001b" +
+        "\u0004\0\u0002\u001c\u0001\0\u0001\u002f\u0001\0\u0001\u0009\u0002\u000a\u0001\u000b" +
+        "\u0001\u001c\u0004\0\u0001\u001c\u0001\u001d\u0001\0\u0001\u0030\u0001\0\u0001\u0009" +
+        "\u0002\u000d\u0001\u000e\u0001\u001d\u0004\0\u0001\u001c\u0001\u001d\u0001\0\u0001\u002f" +
+        "\u0001\0\u0001\u0009\u0002\u000a\u0001\u000b\u0001\u001e\u0004\0\u0002\u001f\u0001\0" +
+        "\u0001\u000a\u0001\0\u0001\u0009\u0002\u000a\u0001\u000b\u0001\u001f\u0004\0\u0001\u001f" +
+        "\u0001\u0020\u0001\0\u0001\u000d\u0001\0\u0001\u0009\u0002\u000d\u0001\u000e\u0001\u0020" +
+        "\u0004\0\u0001\u001f\u0001\u0020\u0001\0\u0001\u000a\u0001\0\u0001\u0009\u0002\u000a" +
+        "\u0001\u000b\u0001\u0021\u0004\0\u0002\u0022\u0001\0\u0001\u000b\u0002\0\u0003\u000b" +
+        "\u0001\u0022\u0004\0\u0001\u0022\u0001\u0023\u0001\0\u0001\u000e\u0002\0\u0003\u000e" +
+        "\u0001\u0023\u0004\0\u0001\u0022\u0001\u0023\u0001\0\u0001\u000b\u0002\0\u0003\u000b" +
+        "\u0001\u0024\u0006\0\u0001\u000f\u0006\0\u0001\u0025\u0004\0\u0001\u0014\u0001\u0015" +
+        "\u0001\0\u0001\u0031\u0001\0\u0001\u0009\u0002\u002a\u0001\0\u0001\u0016\u0004\0" +
+        "\u0002\u0017\u0001\0\u0001\u002e\u0002\0\u0001\u002e\u0002\0\u0001\u0028\u0004\0" +
+        "\u0002\u0014\u0007\0\u0001\u0014\u0004\0\u0002\u0018\u0007\0\u0001\u0018\u0004\0" +
+        "\u0002\u001c\u0007\0\u0001\u001c\u0004\0\u0002\u001f\u0007\0\u0001\u001f\u0004\0" +
+        "\u0002\u0022\u0007\0\u0001\u0022\u0004\0\u0002\u0032\u0007\0\u0001\u0032\u0004\0" +
+        "\u0002\u0014\u0007\0\u0001\u0033\u0004\0\u0002\u0032\u0001\0\u0001\u002e\u0002\0" +
+        "\u0001\u002e\u0002\0\u0001\u0032\u0004\0\u0002\u0014\u0001\0\u0001\u0031\u0001\0" +
+        "\u0001\u0009\u0002\u002a\u0001\0\u0001\u0014\u0003\0";
+
+        private static int[] zzUnpackTrans()
+        {
+            int[] result = new int[658];
+            int offset = 0;
+            offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackTrans(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                value--;
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+        /* error codes */
+        private const int ZZ_UNKNOWN_ERROR = 0;
+        private const int ZZ_NO_MATCH = 1;
+        private const int ZZ_PUSHBACK_2BIG = 2;
+
+        /* error messages for the codes above */
+        private static readonly String[] ZZ_ERROR_MSG = {
+        "Unkown internal scanner error",
+        "Error: could not match input",
+        "Error: pushback value was too large"
+        };
+
+        /**
+        * ZZ_ATTRIBUTE[aState] contains the attributes of state <code>aState</code>
+        */
+        private static readonly int[] ZZ_ATTRIBUTE = zzUnpackAttribute();
+
+        private const String ZZ_ATTRIBUTE_PACKED_0 =
+        "\u0001\0\u0001\u0009\u0003\u0001\u0001\u0009\u0001\u0001\u000b\0\u0004\u0001\u0002\0" +
+        "\u0001\u0001\u0001\0\u000f\u0001\u0001\0\u0001\u0001\u0003\0\u0005\u0001";
+
+        private static int[] zzUnpackAttribute()
+        {
+            int[] result = new int[51];
+            int offset = 0;
+            offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackAttribute(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+        /** the input device */
+        private TextReader zzReader;
+
+        /** the current state of the DFA */
+        private int zzState;
+
+        /** the current lexical state */
+        private int zzLexicalState = YYINITIAL;
+
+        /** this buffer contains the current text to be matched and is
+        the source of the yytext() string */
+        private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
+
+        /** the textposition at the last accepting state */
+        private int zzMarkedPos;
+
+        /** the current text position in the buffer */
+        private int zzCurrentPos;
+
+        /** startRead marks the beginning of the yytext() string in the buffer */
+        private int zzStartRead;
+
+        /** endRead marks the last character in the buffer, that has been read
+        from input */
+        private int zzEndRead;
+
+        /** number of newlines encountered up to the start of the matched text */
+        private int yyline;
+
+        /** the number of characters up to the start of the matched text */
+        private int yychar;
+
+        /**
+        * the number of characters from the last newline up to the start of the 
+        * matched text
+        */
+        private int yycolumn;
+
+        /** 
+        * zzAtBOL == true <=> the scanner is currently at the beginning of a line
+        */
+        private bool zzAtBOL = true;
+
+        /** zzAtEOF == true <=> the scanner is at the EOF */
+        private bool zzAtEOF;
+
+        /** denotes if the user-EOF-code has already been executed */
+        private bool zzEOFDone;
+
+
+        /* user code: */
+
+        public const int ALPHANUM = StandardTokenizer.ALPHANUM;
+        public const int APOSTROPHE = StandardTokenizer.APOSTROPHE;
+        public const int ACRONYM = StandardTokenizer.ACRONYM;
+        public const int COMPANY = StandardTokenizer.COMPANY;
+        public const int EMAIL = StandardTokenizer.EMAIL;
+        public const int HOST = StandardTokenizer.HOST;
+        public const int NUM = StandardTokenizer.NUM;
+        public const int CJ = StandardTokenizer.CJ;
+        public const int ACRONYM_DEP = StandardTokenizer.ACRONYM_DEP;
+
+        public static readonly String[] TOKEN_TYPES = StandardTokenizer.TOKEN_TYPES;
+
+        public int YYChar
+        {
+            get { return yychar; }
+        }
+
+        public void GetText(Tokenattributes.ICharTermAttribute t)
+        {
+            t.CopyBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+
+        /**
+        * Creates a new scanner
+        * There is also a java.io.InputStream version of this constructor.
+        *
+        * @param   in  the java.io.Reader to read input from.
+        */
+        internal ClassicTokenizerImpl(TextReader input)
+        {
+            this.zzReader = input;
+        }
+
+        private static char[] zzUnpackCMap(String packed)
+        {
+            char[] map = new char[0x10000];
+            int i = 0;  /* index in packed string  */
+            int j = 0;  /* index in unpacked array */
+            while (i < 1154)
+            {
+                int count = packed[i++];
+                char value = packed[i++];
+                do map[j++] = value; while (--count > 0);
+            }
+            return map;
+        }
+
+        private bool zzRefill()
+        {
+
+            /* first: make room (if you can) */
+            if (zzStartRead > 0)
+            {
+                Array.Copy(zzBuffer, zzStartRead,
+                                 zzBuffer, 0,
+                                 zzEndRead - zzStartRead);
+
+                /* translate stored positions */
+                zzEndRead -= zzStartRead;
+                zzCurrentPos -= zzStartRead;
+                zzMarkedPos -= zzStartRead;
+                zzStartRead = 0;
+            }
+
+            /* is the buffer big enough? */
+            if (zzCurrentPos >= zzBuffer.Length)
+            {
+                /* if not: blow it up */
+                char[] newBuffer = new char[zzCurrentPos * 2];
+                Array.Copy(zzBuffer, 0, newBuffer, 0, zzBuffer.Length);
+                zzBuffer = newBuffer;
+            }
+
+            /* finally: fill the buffer with new input */
+            int numRead = zzReader.Read(zzBuffer, zzEndRead,
+                                                    zzBuffer.Length - zzEndRead);
+
+            if (numRead > 0)
+            {
+                zzEndRead += numRead;
+                return false;
+            }
+            // unlikely but not impossible: read 0 characters, but not at end of stream    
+            if (numRead == 0)
+            {
+                int c = zzReader.Read();
+                if (c <= 0)
+                {
+                    return true;
+                }
+                else
+                {
+                    zzBuffer[zzEndRead++] = (char)c;
+                    return false;
+                }
+            }
+
+            // numRead < 0
+            return true;
+        }
+
+        public void yyclose()
+        {
+            zzAtEOF = true;            /* indicate end of file */
+            zzEndRead = zzStartRead;  /* invalidate buffer    */
+
+            if (zzReader != null)
+                zzReader.Close();
+        }
+
+        public void YYReset(TextReader reader)
+        {
+            zzReader = reader;
+            zzAtBOL = true;
+            zzAtEOF = false;
+            zzEOFDone = false;
+            zzEndRead = zzStartRead = 0;
+            zzCurrentPos = zzMarkedPos = 0;
+            yyline = yychar = yycolumn = 0;
+            zzLexicalState = YYINITIAL;
+            if (zzBuffer.Length > ZZ_BUFFERSIZE)
+                zzBuffer = new char[ZZ_BUFFERSIZE];
+        }
+
+        public int yystate()
+        {
+            return zzLexicalState;
+        }
+
+        public void yybegin(int newState)
+        {
+            zzLexicalState = newState;
+        }
+
+        public String yytext()
+        {
+            return new String(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+
+        public char yycharat(int pos)
+        {
+            return zzBuffer[zzStartRead + pos];
+        }
+
+        public int YYLength
+        {
+            get { return zzMarkedPos - zzStartRead; }
+        }
+
+        private void zzScanError(int errorCode)
+        {
+            String message;
+            try
+            {
+                message = ZZ_ERROR_MSG[errorCode];
+            }
+            catch (IndexOutOfRangeException e)
+            {
+                message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR];
+            }
+
+            throw new Exception(message);
+        }
+
+        public void yypushback(int number)
+        {
+            if (number > YYLength)
+                zzScanError(ZZ_PUSHBACK_2BIG);
+
+            zzMarkedPos -= number;
+        }
+
+        public int GetNextToken()
+        {
+            int zzInput;
+            int zzAction;
+
+            // cached fields:
+            int zzCurrentPosL;
+            int zzMarkedPosL;
+            int zzEndReadL = zzEndRead;
+            char[] zzBufferL = zzBuffer;
+            char[] zzCMapL = ZZ_CMAP;
+
+            int[] zzTransL = ZZ_TRANS;
+            int[] zzRowMapL = ZZ_ROWMAP;
+            int[] zzAttrL = ZZ_ATTRIBUTE;
+
+            while (true)
+            {
+                zzMarkedPosL = zzMarkedPos;
+
+                yychar += zzMarkedPosL - zzStartRead;
+
+                zzAction = -1;
+
+                zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL;
+
+                zzState = ZZ_LEXSTATE[zzLexicalState];
+
+                // set up zzAction for empty match case:
+                int zzAttributes = zzAttrL[zzState];
+                if ((zzAttributes & 1) == 1)
+                {
+                    zzAction = zzState;
+                }
+
+
+            //zzForAction:
+                {
+                    while (true)
+                    {
+
+                        if (zzCurrentPosL < zzEndReadL)
+                            zzInput = zzBufferL[zzCurrentPosL++];
+                        else if (zzAtEOF)
+                        {
+                            zzInput = YYEOF;
+                            break;
+                        }
+                        else
+                        {
+                            // store back cached positions
+                            zzCurrentPos = zzCurrentPosL;
+                            zzMarkedPos = zzMarkedPosL;
+                            bool eof = zzRefill();
+                            // get translated positions and possibly new buffer
+                            zzCurrentPosL = zzCurrentPos;
+                            zzMarkedPosL = zzMarkedPos;
+                            zzBufferL = zzBuffer;
+                            zzEndReadL = zzEndRead;
+                            if (eof)
+                            {
+                                zzInput = YYEOF;
+                                break;
+                            }
+                            else
+                            {
+                                zzInput = zzBufferL[zzCurrentPosL++];
+                            }
+                        }
+                        int zzNext = zzTransL[zzRowMapL[zzState] + zzCMapL[zzInput]];
+                        if (zzNext == -1) break;
+                        zzState = zzNext;
+
+                        zzAttributes = zzAttrL[zzState];
+                        if ((zzAttributes & 1) == 1)
+                        {
+                            zzAction = zzState;
+                            zzMarkedPosL = zzCurrentPosL;
+                            if ((zzAttributes & 8) == 8) break;
+                        }
+
+                    }
+                }
+
+                // store back cached position
+                zzMarkedPos = zzMarkedPosL;
+
+                switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction])
+                {
+                    case 1:
+                        { /* Break so we don't hit fall-through warning: */
+                            break;/* ignore */
+                        }
+                    case 11: break;
+                    case 2:
+                        {
+                            return ALPHANUM;
+                        }
+                    case 12: break;
+                    case 3:
+                        {
+                            return CJ;
+                        }
+                    case 13: break;
+                    case 4:
+                        {
+                            return HOST;
+                        }
+                    case 14: break;
+                    case 5:
+                        {
+                            return NUM;
+                        }
+                    case 15: break;
+                    case 6:
+                        {
+                            return APOSTROPHE;
+                        }
+                    case 16: break;
+                    case 7:
+                        {
+                            return COMPANY;
+                        }
+                    case 17: break;
+                    case 8:
+                        {
+                            return ACRONYM_DEP;
+                        }
+                    case 18: break;
+                    case 9:
+                        {
+                            return ACRONYM;
+                        }
+                    case 19: break;
+                    case 10:
+                        {
+                            return EMAIL;
+                        }
+                    case 20: break;
+                    default:
+                        if (zzInput == YYEOF && zzStartRead == zzCurrentPos)
+                        {
+                            zzAtEOF = true;
+                            return YYEOF;
+                        }
+                        else
+                        {
+                            zzScanError(ZZ_NO_MATCH);
+                        }
+                        break;
+                }
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/IStandardTokenizerInterface.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/IStandardTokenizerInterface.cs b/src/contrib/Analyzers/Standard/IStandardTokenizerInterface.cs
new file mode 100644
index 0000000..883e7a0
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/IStandardTokenizerInterface.cs
@@ -0,0 +1,27 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public interface IStandardTokenizerInterface
+    {
+        void GetText(ICharTermAttribute t);
+
+        int YYChar { get; }
+
+        void YYReset(TextReader reader);
+
+        int YYLength { get; }
+
+        int GetNextToken();
+    }
+
+    public static class StandardTokenizerInterface
+    {
+        public const int YYEOF = -1;
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/StandardAnalyzer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/StandardAnalyzer.cs b/src/contrib/Analyzers/Standard/StandardAnalyzer.cs
new file mode 100644
index 0000000..dead459
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/StandardAnalyzer.cs
@@ -0,0 +1,70 @@
+using Lucene.Net.Analysis.Core;
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public sealed class StandardAnalyzer : StopwordAnalyzerBase
+    {
+        public const int DEFAULT_MAX_TOKEN_LENGTH = 255;
+
+        private int maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH;
+
+        public static readonly CharArraySet STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+
+        public StandardAnalyzer(Version? matchVersion, CharArraySet stopWords)
+            : base(matchVersion, stopWords)
+        {
+        }
+
+        public StandardAnalyzer(Version? matchVersion)
+            : this(matchVersion, STOP_WORDS_SET)
+        {
+        }
+
+        public StandardAnalyzer(Version? matchVersion, TextReader stopwords)
+            : this(matchVersion, LoadStopwordSet(stopwords, matchVersion))
+        {
+        }
+
+        public int MaxTokenLength
+        {
+            get { return maxTokenLength; }
+            set { maxTokenLength = value; }
+        }
+
+        public override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
+        {
+            StandardTokenizer src = new StandardTokenizer(matchVersion, reader);
+            src.MaxTokenLength = maxTokenLength;
+            TokenStream tok = new StandardFilter(matchVersion, src);
+            tok = new LowerCaseFilter(matchVersion, tok);
+            tok = new StopFilter(matchVersion, tok, stopwords);
+            return new AnonymousTokenStreamComponents(this, src, tok);
+        }
+
+        private sealed class AnonymousTokenStreamComponents : TokenStreamComponents
+        {
+            private readonly StandardTokenizer src;
+            private readonly StandardAnalyzer parent;
+
+            public AnonymousTokenStreamComponents(StandardAnalyzer parent, StandardTokenizer src, TokenStream tok)
+                : base(src, tok)
+            {
+                this.parent = parent;
+                this.src = src;
+            }
+
+            public override void SetReader(TextReader reader)
+            {
+                src.MaxTokenLength = parent.maxTokenLength;
+                base.SetReader(reader);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/StandardFilter.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/StandardFilter.cs b/src/contrib/Analyzers/Standard/StandardFilter.cs
new file mode 100644
index 0000000..9381883
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/StandardFilter.cs
@@ -0,0 +1,73 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public class StandardFilter : TokenFilter
+    {
+        private readonly Version? matchVersion;
+
+        public StandardFilter(Version? matchVersion, TokenStream input)
+            : base(input)
+        {
+            this.matchVersion = matchVersion;
+
+            typeAtt = AddAttribute<ITypeAttribute>();
+            termAtt = AddAttribute<ICharTermAttribute>();
+        }
+
+        private static readonly String APOSTROPHE_TYPE = ClassicTokenizer.TOKEN_TYPES[ClassicTokenizer.APOSTROPHE];
+        private static readonly String ACRONYM_TYPE = ClassicTokenizer.TOKEN_TYPES[ClassicTokenizer.ACRONYM];
+
+        // this filters uses attribute type
+        private readonly ITypeAttribute typeAtt; // = addAttribute(TypeAttribute.class);
+        private readonly ICharTermAttribute termAtt; // = addAttribute(CharTermAttribute.class);
+
+        public override bool IncrementToken()
+        {
+            if (matchVersion.GetValueOrDefault().OnOrAfter(Version.LUCENE_31))
+                return input.IncrementToken(); // TODO: add some niceties for the new grammar
+            else
+                return IncrementTokenClassic();
+        }
+
+        public bool IncrementTokenClassic()
+        {
+            if (!input.IncrementToken())
+            {
+                return false;
+            }
+
+            char[] buffer = termAtt.Buffer;
+            int bufferLength = termAtt.Length;
+            String type = typeAtt.Type;
+
+            if (type == APOSTROPHE_TYPE &&      // remove 's
+                bufferLength >= 2 &&
+                buffer[bufferLength - 2] == '\'' &&
+                (buffer[bufferLength - 1] == 's' || buffer[bufferLength - 1] == 'S'))
+            {
+                // Strip last 2 characters off
+                termAtt.SetLength(bufferLength - 2);
+            }
+            else if (type == ACRONYM_TYPE)
+            {      // remove dots
+                int upto = 0;
+                for (int i = 0; i < bufferLength; i++)
+                {
+                    char c = buffer[i];
+                    if (c != '.')
+                        buffer[upto++] = c;
+                }
+                termAtt.SetLength(upto);
+            }
+
+            return true;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/StandardFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/StandardFilterFactory.cs b/src/contrib/Analyzers/Standard/StandardFilterFactory.cs
new file mode 100644
index 0000000..447b5e3
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/StandardFilterFactory.cs
@@ -0,0 +1,26 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public class StandardFilterFactory : TokenFilterFactory
+    {
+        public StandardFilterFactory(IDictionary<String, String> args)
+            : base(args)
+        {            
+            AssureMatchVersion();
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+
+        public override TokenStream Create(TokenStream input)
+        {
+            return new StandardFilter(luceneMatchVersion, input);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/StandardTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/StandardTokenizer.cs b/src/contrib/Analyzers/Standard/StandardTokenizer.cs
new file mode 100644
index 0000000..4c3d375
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/StandardTokenizer.cs
@@ -0,0 +1,167 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using Lucene.Net.Util;
+using Version = Lucene.Net.Util.Version;
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Analysis.Standard.Std31;
+using Lucene.Net.Analysis.Standard.Std34;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public sealed class StandardTokenizer : Tokenizer
+    {
+        private IStandardTokenizerInterface scanner;
+
+        public const int ALPHANUM = 0;
+        [Obsolete]
+        public const int APOSTROPHE = 1;
+        [Obsolete]
+        public const int ACRONYM = 2;
+        [Obsolete]
+        public const int COMPANY = 3;
+        public const int EMAIL = 4;
+        [Obsolete]
+        public const int HOST = 5;
+        public const int NUM = 6;
+        [Obsolete]
+        public const int CJ = 7;
+        [Obsolete]
+        public const int ACRONYM_DEP = 8;
+        public const int SOUTHEAST_ASIAN = 9;
+        public const int IDEOGRAPHIC = 10;
+        public const int HIRAGANA = 11;
+        public const int KATAKANA = 12;
+        public const int HANGUL = 13;
+
+        public static readonly string[] TOKEN_TYPES = new string[] {
+            "<ALPHANUM>",
+            "<APOSTROPHE>",
+            "<ACRONYM>",
+            "<COMPANY>",
+            "<EMAIL>",
+            "<HOST>",
+            "<NUM>",
+            "<CJ>",
+            "<ACRONYM_DEP>",
+            "<SOUTHEAST_ASIAN>",
+            "<IDEOGRAPHIC>",
+            "<HIRAGANA>",
+            "<KATAKANA>",
+            "<HANGUL>"
+          };
+
+        private int maxTokenLength = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH;
+
+        public int MaxTokenLength
+        {
+            get { return maxTokenLength; }
+            set { maxTokenLength = value; }
+        }
+
+        public StandardTokenizer(Version? matchVersion, TextReader input)
+            : base(input)
+        {
+            termAtt = AddAttribute<ICharTermAttribute>();
+            offsetAtt = AddAttribute<IOffsetAttribute>();
+            posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
+            typeAtt = AddAttribute<ITypeAttribute>();
+
+            Init(matchVersion.GetValueOrDefault());
+        }
+
+        public StandardTokenizer(Version? matchVersion, AttributeFactory factory, TextReader input)
+            : base(factory, input)
+        {
+            termAtt = AddAttribute<ICharTermAttribute>();
+            offsetAtt = AddAttribute<IOffsetAttribute>();
+            posIncrAtt = AddAttribute<IPositionIncrementAttribute>();
+            typeAtt = AddAttribute<ITypeAttribute>();
+
+            Init(matchVersion.GetValueOrDefault());
+        }
+
+        private void Init(Version matchVersion)
+        {
+            // best effort NPE if you dont call reset
+            if (matchVersion.OnOrAfter(Version.LUCENE_40))
+            {
+                this.scanner = new StandardTokenizerImpl(null);
+            }
+            else if (matchVersion.OnOrAfter(Version.LUCENE_34))
+            {
+                this.scanner = new StandardTokenizerImpl34(null);
+            }
+            else if (matchVersion.OnOrAfter(Version.LUCENE_31))
+            {
+                this.scanner = new StandardTokenizerImpl31(null);
+            }
+            else
+            {
+                this.scanner = new ClassicTokenizerImpl(null);
+            }
+        }
+
+        // this tokenizer generates three attributes:
+        // term offset, positionIncrement and type
+        private readonly ICharTermAttribute termAtt; // = addAttribute(CharTermAttribute.class);
+        private readonly IOffsetAttribute offsetAtt; // = addAttribute(OffsetAttribute.class);
+        private readonly IPositionIncrementAttribute posIncrAtt; // = addAttribute(PositionIncrementAttribute.class);
+        private readonly ITypeAttribute typeAtt; // = addAttribute(TypeAttribute.class);
+
+        public override bool IncrementToken()
+        {
+            ClearAttributes();
+            int posIncr = 1;
+
+            while (true)
+            {
+                int tokenType = scanner.GetNextToken();
+
+                if (tokenType == StandardTokenizerInterface.YYEOF)
+                {
+                    return false;
+                }
+
+                if (scanner.YYLength <= maxTokenLength)
+                {
+                    posIncrAtt.PositionIncrement = posIncr;
+                    scanner.GetText(termAtt);
+                    int start = scanner.YYChar;
+                    offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + termAtt.Length));
+                    // This 'if' should be removed in the next release. For now, it converts
+                    // invalid acronyms to HOST. When removed, only the 'else' part should
+                    // remain.
+                    if (tokenType == StandardTokenizer.ACRONYM_DEP)
+                    {
+                        typeAtt.Type = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HOST];
+                        termAtt.SetLength(termAtt.Length - 1); // remove extra '.'
+                    }
+                    else
+                    {
+                        typeAtt.Type = StandardTokenizer.TOKEN_TYPES[tokenType];
+                    }
+                    return true;
+                }
+                else
+                    // When we skip a too-long term, we still increment the
+                    // position increment
+                    posIncr++;
+            }
+        }
+
+        public override void End()
+        {
+            // set final offset
+            int finalOffset = CorrectOffset(scanner.YYChar + scanner.YYLength);
+            offsetAtt.SetOffset(finalOffset, finalOffset);
+        }
+
+        public override void Reset()
+        {
+            scanner.YYReset(input);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/StandardTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/StandardTokenizerFactory.cs b/src/contrib/Analyzers/Standard/StandardTokenizerFactory.cs
new file mode 100644
index 0000000..bfc64ca
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/StandardTokenizerFactory.cs
@@ -0,0 +1,31 @@
+using Lucene.Net.Analysis.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public class StandardTokenizerFactory : TokenizerFactory
+    {
+        private readonly int maxTokenLength;
+
+        public StandardTokenizerFactory(IDictionary<String, String> args)
+            : base(args)
+        {
+            AssureMatchVersion();
+            maxTokenLength = GetInt(args, "maxTokenLength", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
+            if (args.Count > 0)
+            {
+                throw new ArgumentException("Unknown parameters: " + args);
+            }
+        }
+
+        public override Tokenizer Create(Net.Util.AttributeSource.AttributeFactory factory, System.IO.TextReader input)
+        {
+            StandardTokenizer tokenizer = new StandardTokenizer(luceneMatchVersion, factory, input);
+            tokenizer.MaxTokenLength = maxTokenLength;
+            return tokenizer;
+        }
+    }
+}


[40/50] [abbrv] Implement Standard and Classic Analyzers

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/StandardTokenizerImpl.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/StandardTokenizerImpl.cs b/src/contrib/Analyzers/Standard/StandardTokenizerImpl.cs
new file mode 100644
index 0000000..f91bd9b
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/StandardTokenizerImpl.cs
@@ -0,0 +1,1241 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard
+{
+    public sealed class StandardTokenizerImpl : IStandardTokenizerInterface
+    {
+
+        /** This character denotes the end of file */
+        public const int YYEOF = -1;
+
+        /** initial size of the lookahead buffer */
+        private const int ZZ_BUFFERSIZE = 4096;
+
+        /** lexical states */
+        public const int YYINITIAL = 0;
+
+        /**
+         * ZZ_LEXSTATE[l] is the state in the DFA for the lexical state l
+         * ZZ_LEXSTATE[l+1] is the state in the DFA for the lexical state l
+         *                  at the beginning of a line
+         * l is of the form l = 2*k, k a non negative integer
+         */
+        private static readonly int[] ZZ_LEXSTATE = { 
+     0, 0
+  };
+
+        /** 
+         * Translates characters to character classes
+         */
+        private const String ZZ_CMAP_PACKED =
+          "\u0027\0\u0001\u0082\u0004\0\u0001\u0081\u0001\0\u0001\u0082\u0001\0\u000a\u007e\u0001\u0080\u0001\u0081" +
+          "\u0005\0\u001a\u007c\u0004\0\u0001\u0083\u0001\0\u001a\u007c\u002f\0\u0001\u007c\u0002\0\u0001\u007d" +
+          "\u0007\0\u0001\u007c\u0001\0\u0001\u0080\u0002\0\u0001\u007c\u0005\0\u0017\u007c\u0001\0\u001f\u007c" +
+          "\u0001\0\u01ca\u007c\u0004\0\u000c\u007c\u000e\0\u0005\u007c\u0007\0\u0001\u007c\u0001\0\u0001\u007c" +
+          "\u0011\0\u0070\u007d\u0005\u007c\u0001\0\u0002\u007c\u0002\0\u0004\u007c\u0001\u0081\u0007\0\u0001\u007c" +
+          "\u0001\u0080\u0003\u007c\u0001\0\u0001\u007c\u0001\0\u0014\u007c\u0001\0\u0053\u007c\u0001\0\u008b\u007c" +
+          "\u0001\0\u0007\u007d\u009e\u007c\u0009\0\u0026\u007c\u0002\0\u0001\u007c\u0007\0\u0027\u007c\u0001\0" +
+          "\u0001\u0081\u0007\0\u002d\u007d\u0001\0\u0001\u007d\u0001\0\u0002\u007d\u0001\0\u0002\u007d\u0001\0" +
+          "\u0001\u007d\u0008\0\u001b\u007c\u0005\0\u0004\u007c\u0001\u0080\u000b\0\u0005\u007d\u0007\0\u0002\u0081" +
+          "\u0002\0\u000b\u007d\u0005\0\u002b\u007c\u0015\u007d\u000a\u007e\u0001\0\u0001\u007e\u0001\u0081\u0001\0" +
+          "\u0002\u007c\u0001\u007d\u0063\u007c\u0001\0\u0001\u007c\u0007\u007d\u0001\u007d\u0001\0\u0006\u007d\u0002\u007c" +
+          "\u0002\u007d\u0001\0\u0004\u007d\u0002\u007c\u000a\u007e\u0003\u007c\u0002\0\u0001\u007c\u000f\0\u0001\u007d" +
+          "\u0001\u007c\u0001\u007d\u001e\u007c\u001b\u007d\u0002\0\u0059\u007c\u000b\u007d\u0001\u007c\u000e\0\u000a\u007e" +
+          "\u0021\u007c\u0009\u007d\u0002\u007c\u0002\0\u0001\u0081\u0001\0\u0001\u007c\u0005\0\u0016\u007c\u0004\u007d" +
+          "\u0001\u007c\u0009\u007d\u0001\u007c\u0003\u007d\u0001\u007c\u0005\u007d\u0012\0\u0019\u007c\u0003\u007d\u0044\0" +
+          "\u0001\u007c\u0001\0\u000b\u007c\u0037\0\u001b\u007d\u0001\0\u0004\u007d\u0036\u007c\u0003\u007d\u0001\u007c" +
+          "\u0012\u007d\u0001\u007c\u0007\u007d\u000a\u007c\u0002\u007d\u0002\0\u000a\u007e\u0001\0\u0007\u007c\u0001\0" +
+          "\u0007\u007c\u0001\0\u0003\u007d\u0001\0\u0008\u007c\u0002\0\u0002\u007c\u0002\0\u0016\u007c\u0001\0" +
+          "\u0007\u007c\u0001\0\u0001\u007c\u0003\0\u0004\u007c\u0002\0\u0001\u007d\u0001\u007c\u0007\u007d\u0002\0" +
+          "\u0002\u007d\u0002\0\u0003\u007d\u0001\u007c\u0008\0\u0001\u007d\u0004\0\u0002\u007c\u0001\0\u0003\u007c" +
+          "\u0002\u007d\u0002\0\u000a\u007e\u0002\u007c\u000f\0\u0003\u007d\u0001\0\u0006\u007c\u0004\0\u0002\u007c" +
+          "\u0002\0\u0016\u007c\u0001\0\u0007\u007c\u0001\0\u0002\u007c\u0001\0\u0002\u007c\u0001\0\u0002\u007c" +
+          "\u0002\0\u0001\u007d\u0001\0\u0005\u007d\u0004\0\u0002\u007d\u0002\0\u0003\u007d\u0003\0\u0001\u007d" +
+          "\u0007\0\u0004\u007c\u0001\0\u0001\u007c\u0007\0\u000a\u007e\u0002\u007d\u0003\u007c\u0001\u007d\u000b\0" +
+          "\u0003\u007d\u0001\0\u0009\u007c\u0001\0\u0003\u007c\u0001\0\u0016\u007c\u0001\0\u0007\u007c\u0001\0" +
+          "\u0002\u007c\u0001\0\u0005\u007c\u0002\0\u0001\u007d\u0001\u007c\u0008\u007d\u0001\0\u0003\u007d\u0001\0" +
+          "\u0003\u007d\u0002\0\u0001\u007c\u000f\0\u0002\u007c\u0002\u007d\u0002\0\u000a\u007e\u0011\0\u0003\u007d" +
+          "\u0001\0\u0008\u007c\u0002\0\u0002\u007c\u0002\0\u0016\u007c\u0001\0\u0007\u007c\u0001\0\u0002\u007c" +
+          "\u0001\0\u0005\u007c\u0002\0\u0001\u007d\u0001\u007c\u0007\u007d\u0002\0\u0002\u007d\u0002\0\u0003\u007d" +
+          "\u0008\0\u0002\u007d\u0004\0\u0002\u007c\u0001\0\u0003\u007c\u0002\u007d\u0002\0\u000a\u007e\u0001\0" +
+          "\u0001\u007c\u0010\0\u0001\u007d\u0001\u007c\u0001\0\u0006\u007c\u0003\0\u0003\u007c\u0001\0\u0004\u007c" +
+          "\u0003\0\u0002\u007c\u0001\0\u0001\u007c\u0001\0\u0002\u007c\u0003\0\u0002\u007c\u0003\0\u0003\u007c" +
+          "\u0003\0\u000c\u007c\u0004\0\u0005\u007d\u0003\0\u0003\u007d\u0001\0\u0004\u007d\u0002\0\u0001\u007c" +
+          "\u0006\0\u0001\u007d\u000e\0\u000a\u007e\u0011\0\u0003\u007d\u0001\0\u0008\u007c\u0001\0\u0003\u007c" +
+          "\u0001\0\u0017\u007c\u0001\0\u000a\u007c\u0001\0\u0005\u007c\u0003\0\u0001\u007c\u0007\u007d\u0001\0" +
+          "\u0003\u007d\u0001\0\u0004\u007d\u0007\0\u0002\u007d\u0001\0\u0002\u007c\u0006\0\u0002\u007c\u0002\u007d" +
+          "\u0002\0\u000a\u007e\u0012\0\u0002\u007d\u0001\0\u0008\u007c\u0001\0\u0003\u007c\u0001\0\u0017\u007c" +
+          "\u0001\0\u000a\u007c\u0001\0\u0005\u007c\u0002\0\u0001\u007d\u0001\u007c\u0007\u007d\u0001\0\u0003\u007d" +
+          "\u0001\0\u0004\u007d\u0007\0\u0002\u007d\u0007\0\u0001\u007c\u0001\0\u0002\u007c\u0002\u007d\u0002\0" +
+          "\u000a\u007e\u0001\0\u0002\u007c\u000f\0\u0002\u007d\u0001\0\u0008\u007c\u0001\0\u0003\u007c\u0001\0" +
+          "\u0029\u007c\u0002\0\u0001\u007c\u0007\u007d\u0001\0\u0003\u007d\u0001\0\u0004\u007d\u0001\u007c\u0008\0" +
+          "\u0001\u007d\u0008\0\u0002\u007c\u0002\u007d\u0002\0\u000a\u007e\u000a\0\u0006\u007c\u0002\0\u0002\u007d" +
+          "\u0001\0\u0012\u007c\u0003\0\u0018\u007c\u0001\0\u0009\u007c\u0001\0\u0001\u007c\u0002\0\u0007\u007c" +
+          "\u0003\0\u0001\u007d\u0004\0\u0006\u007d\u0001\0\u0001\u007d\u0001\0\u0008\u007d\u0012\0\u0002\u007d" +
+          "\u000d\0\u0030\u0084\u0001\u0085\u0002\u0084\u0007\u0085\u0005\0\u0007\u0084\u0008\u0085\u0001\0\u000a\u007e" +
+          "\u0027\0\u0002\u0084\u0001\0\u0001\u0084\u0002\0\u0002\u0084\u0001\0\u0001\u0084\u0002\0\u0001\u0084" +
+          "\u0006\0\u0004\u0084\u0001\0\u0007\u0084\u0001\0\u0003\u0084\u0001\0\u0001\u0084\u0001\0\u0001\u0084" +
+          "\u0002\0\u0002\u0084\u0001\0\u0004\u0084\u0001\u0085\u0002\u0084\u0006\u0085\u0001\0\u0002\u0085\u0001\u0084" +
+          "\u0002\0\u0005\u0084\u0001\0\u0001\u0084\u0001\0\u0006\u0085\u0002\0\u000a\u007e\u0002\0\u0004\u0084" +
+          "\u0020\0\u0001\u007c\u0017\0\u0002\u007d\u0006\0\u000a\u007e\u000b\0\u0001\u007d\u0001\0\u0001\u007d" +
+          "\u0001\0\u0001\u007d\u0004\0\u0002\u007d\u0008\u007c\u0001\0\u0024\u007c\u0004\0\u0014\u007d\u0001\0" +
+          "\u0002\u007d\u0005\u007c\u000b\u007d\u0001\0\u0024\u007d\u0009\0\u0001\u007d\u0039\0\u002b\u0084\u0014\u0085" +
+          "\u0001\u0084\u000a\u007e\u0006\0\u0006\u0084\u0004\u0085\u0004\u0084\u0003\u0085\u0001\u0084\u0003\u0085\u0002\u0084" +
+          "\u0007\u0085\u0003\u0084\u0004\u0085\u000d\u0084\u000c\u0085\u0001\u0084\u0001\u0085\u000a\u007e\u0004\u0085\u0002\u0084" +
+          "\u0026\u007c\u0001\0\u0001\u007c\u0005\0\u0001\u007c\u0002\0\u002b\u007c\u0001\0\u0004\u007c\u0100\u0088" +
+          "\u0049\u007c\u0001\0\u0004\u007c\u0002\0\u0007\u007c\u0001\0\u0001\u007c\u0001\0\u0004\u007c\u0002\0" +
+          "\u0029\u007c\u0001\0\u0004\u007c\u0002\0\u0021\u007c\u0001\0\u0004\u007c\u0002\0\u0007\u007c\u0001\0" +
+          "\u0001\u007c\u0001\0\u0004\u007c\u0002\0\u000f\u007c\u0001\0\u0039\u007c\u0001\0\u0004\u007c\u0002\0" +
+          "\u0043\u007c\u0002\0\u0003\u007d\u0020\0\u0010\u007c\u0010\0\u0055\u007c\u000c\0\u026c\u007c\u0002\0" +
+          "\u0011\u007c\u0001\0\u001a\u007c\u0005\0\u004b\u007c\u0003\0\u0003\u007c\u000f\0\u000d\u007c\u0001\0" +
+          "\u0004\u007c\u0003\u007d\u000b\0\u0012\u007c\u0003\u007d\u000b\0\u0012\u007c\u0002\u007d\u000c\0\u000d\u007c" +
+          "\u0001\0\u0003\u007c\u0001\0\u0002\u007d\u000c\0\u0034\u0084\u0020\u0085\u0003\0\u0001\u0084\u0004\0" +
+          "\u0001\u0084\u0001\u0085\u0002\0\u000a\u007e\u0021\0\u0003\u007d\u0002\0\u000a\u007e\u0006\0\u0058\u007c" +
+          "\u0008\0\u0029\u007c\u0001\u007d\u0001\u007c\u0005\0\u0046\u007c\u000a\0\u001d\u007c\u0003\0\u000c\u007d" +
+          "\u0004\0\u000c\u007d\u000a\0\u000a\u007e\u001e\u0084\u0002\0\u0005\u0084\u000b\0\u002c\u0084\u0004\0" +
+          "\u0011\u0085\u0007\u0084\u0002\u0085\u0006\0\u000a\u007e\u0001\u0084\u0003\0\u0002\u0084\u0020\0\u0017\u007c" +
+          "\u0005\u007d\u0004\0\u0035\u0084\u000a\u0085\u0001\0\u001d\u0085\u0002\0\u0001\u007d\u000a\u007e\u0006\0" +
+          "\u000a\u007e\u0006\0\u000e\u0084\u0052\0\u0005\u007d\u002f\u007c\u0011\u007d\u0007\u007c\u0004\0\u000a\u007e" +
+          "\u0011\0\u0009\u007d\u000c\0\u0003\u007d\u001e\u007c\u000d\u007d\u0002\u007c\u000a\u007e\u002c\u007c\u000e\u007d" +
+          "\u000c\0\u0024\u007c\u0014\u007d\u0008\0\u000a\u007e\u0003\0\u0003\u007c\u000a\u007e\u0024\u007c\u0052\0" +
+          "\u0003\u007d\u0001\0\u0015\u007d\u0004\u007c\u0001\u007d\u0004\u007c\u0003\u007d\u0002\u007c\u0009\0\u00c0\u007c" +
+          "\u0027\u007d\u0015\0\u0004\u007d\u0116\u007c\u0002\0\u0006\u007c\u0002\0\u0026\u007c\u0002\0\u0006\u007c" +
+          "\u0002\0\u0008\u007c\u0001\0\u0001\u007c\u0001\0\u0001\u007c\u0001\0\u0001\u007c\u0001\0\u001f\u007c" +
+          "\u0002\0\u0035\u007c\u0001\0\u0007\u007c\u0001\0\u0001\u007c\u0003\0\u0003\u007c\u0001\0\u0007\u007c" +
+          "\u0003\0\u0004\u007c\u0002\0\u0006\u007c\u0004\0\u000d\u007c\u0005\0\u0003\u007c\u0001\0\u0007\u007c" +
+          "\u000f\0\u0002\u007d\u0002\u007d\u0008\0\u0002\u0082\u000a\0\u0001\u0082\u0002\0\u0001\u0080\u0002\0" +
+          "\u0005\u007d\u0010\0\u0002\u0083\u0003\0\u0001\u0081\u000f\0\u0001\u0083\u000b\0\u0005\u007d\u0005\0" +
+          "\u0006\u007d\u0001\0\u0001\u007c\u000d\0\u0001\u007c\u0010\0\u000d\u007c\u0033\0\u0021\u007d\u0011\0" +
+          "\u0001\u007c\u0004\0\u0001\u007c\u0002\0\u000a\u007c\u0001\0\u0001\u007c\u0003\0\u0005\u007c\u0006\0" +
+          "\u0001\u007c\u0001\0\u0001\u007c\u0001\0\u0001\u007c\u0001\0\u0004\u007c\u0001\0\u000b\u007c\u0002\0" +
+          "\u0004\u007c\u0005\0\u0005\u007c\u0004\0\u0001\u007c\u0011\0\u0029\u007c\u032d\0\u0034\u007c\u0716\0" +
+          "\u002f\u007c\u0001\0\u002f\u007c\u0001\0\u0085\u007c\u0006\0\u0004\u007c\u0003\u007d\u0002\u007c\u000c\0" +
+          "\u0026\u007c\u0001\0\u0001\u007c\u0005\0\u0001\u007c\u0002\0\u0038\u007c\u0007\0\u0001\u007c\u000f\0" +
+          "\u0001\u007d\u0017\u007c\u0009\0\u0007\u007c\u0001\0\u0007\u007c\u0001\0\u0007\u007c\u0001\0\u0007\u007c" +
+          "\u0001\0\u0007\u007c\u0001\0\u0007\u007c\u0001\0\u0007\u007c\u0001\0\u0007\u007c\u0001\0\u0020\u007d" +
+          "\u002f\0\u0001\u007c\u0050\0\u001a\u0086\u0001\0\u0059\u0086\u000c\0\u00d6\u0086\u002f\0\u0001\u007c" +
+          "\u0001\0\u0001\u0086\u0019\0\u0009\u0086\u0004\u007d\u0002\u007d\u0001\0\u0005\u007f\u0002\0\u0003\u0086" +
+          "\u0001\u007c\u0001\u007c\u0004\0\u0056\u0087\u0002\0\u0002\u007d\u0002\u007f\u0003\u0087\u005b\u007f\u0001\0" +
+          "\u0004\u007f\u0005\0\u0029\u007c\u0003\0\u005e\u0088\u0011\0\u001b\u007c\u0035\0\u0010\u007f\u001f\0" +
+          "\u0041\0\u001f\0\u0051\0\u002f\u007f\u0001\0\u0058\u007f\u00a8\0\u19b6\u0086\u004a\0\u51cd\u0086" +
+          "\u0033\0\u048d\u007c\u0043\0\u002e\u007c\u0002\0\u010d\u007c\u0003\0\u0010\u007c\u000a\u007e\u0002\u007c" +
+          "\u0014\0\u002f\u007c\u0004\u007d\u0001\0\u000a\u007d\u0001\0\u0019\u007c\u0007\0\u0001\u007d\u0050\u007c" +
+          "\u0002\u007d\u0025\0\u0009\u007c\u0002\0\u0067\u007c\u0002\0\u0004\u007c\u0001\0\u0004\u007c\u000c\0" +
+          "\u000b\u007c\u004d\0\u000a\u007c\u0001\u007d\u0003\u007c\u0001\u007d\u0004\u007c\u0001\u007d\u0017\u007c\u0005\u007d" +
+          "\u0018\0\u0034\u007c\u000c\0\u0002\u007d\u0032\u007c\u0011\u007d\u000b\0\u000a\u007e\u0006\0\u0012\u007d" +
+          "\u0006\u007c\u0003\0\u0001\u007c\u0004\0\u000a\u007e\u001c\u007c\u0008\u007d\u0002\0\u0017\u007c\u000d\u007d" +
+          "\u000c\0\u001d\u0088\u0003\0\u0004\u007d\u002f\u007c\u000e\u007d\u000e\0\u0001\u007c\u000a\u007e\u0026\0" +
+          "\u0029\u007c\u000e\u007d\u0009\0\u0003\u007c\u0001\u007d\u0008\u007c\u0002\u007d\u0002\0\u000a\u007e\u0006\0" +
+          "\u001b\u0084\u0001\u0085\u0004\0\u0030\u0084\u0001\u0085\u0001\u0084\u0003\u0085\u0002\u0084\u0002\u0085\u0005\u0084" +
+          "\u0002\u0085\u0001\u0084\u0001\u0085\u0001\u0084\u0018\0\u0005\u0084\u000b\u007c\u0005\u007d\u0002\0\u0003\u007c" +
+          "\u0002\u007d\u000a\0\u0006\u007c\u0002\0\u0006\u007c\u0002\0\u0006\u007c\u0009\0\u0007\u007c\u0001\0" +
+          "\u0007\u007c\u0091\0\u0023\u007c\u0008\u007d\u0001\0\u0002\u007d\u0002\0\u000a\u007e\u0006\0\u2ba4\u0088" +
+          "\u000c\0\u0017\u0088\u0004\0\u0031\u0088\u0004\0\u0001\u0024\u0001\u0020\u0001\u0037\u0001\u0034\u0001\u001b" +
+          "\u0001\u0018\u0002\0\u0001\u0014\u0001\u0011\u0002\0\u0001\u000f\u0001\u000d\u000c\0\u0001\u0003\u0001\u0006" +
+          "\u0010\0\u0001\u006e\u0007\0\u0001\u0049\u0001\u0008\u0005\0\u0001\u0001\u0001\u007a\u0003\0\u0001\u0073" +
+          "\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073" +
+          "\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073" +
+          "\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073" +
+          "\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0073" +
+          "\u0001\u0074\u0001\u0073\u0001\u0073\u0001\u0073\u0001\u0078\u0001\u0076\u000f\0\u0001\u0070\u02c1\0\u0001\u004c" +
+          "\u00bf\0\u0001\u006f\u0001\u004d\u0001\u000e\u0003\u0077\u0002\u0032\u0001\u0077\u0001\u0032\u0002\u0077\u0001\u001e" +
+          "\u0011\u0077\u0002\u0046\u0007\u004f\u0001\u004e\u0007\u004f\u0007\u0042\u0001\u001f\u0001\u0042\u0001\u0058\u0002\u0036" +
+          "\u0001\u0035\u0001\u0058\u0001\u0036\u0001\u0035\u0008\u0058\u0002\u0047\u0005\u0043\u0002\u003d\u0005\u0043\u0001\u0012" +
+          "\u0008\u002b\u0005\u0013\u0003\u0021\u000a\u0067\u0010\u0021\u0003\u0033\u001a\u0023\u0001\u0022\u0002\u0031\u0002\u006c" +
+          "\u0001\u006d\u0002\u006c\u0002\u006d\u0002\u006c\u0001\u006d\u0003\u0031\u0001\u0030\u0002\u0031\u000a\u0048\u0001\u0056" +
+          "\u0001\u0028\u0001\u0025\u0001\u0048\u0006\u0028\u0001\u0025\u000b\u0028\u0019\u0031\u0007\u0028\u000a\u0068\u0001\u0028" +
+          "\u0005\u000b\u0003\u0057\u0003\u0041\u0001\u0040\u0004\u0041\u0002\u0040\u0008\u0041\u0001\u0040\u0007\u001d\u0001\u001c" +
+          "\u0002\u001d\u0007\u0041\u000e\u0057\u0001\u0061\u0004\u006a\u0001\u0004\u0004\u0069\u0001\u0004\u0005\u0060\u0001\u005f" +
+          "\u0001\u0060\u0003\u005f\u0007\u0060\u0001\u005f\u0013\u0060\u0005\u004b\u0003\u0060\u0006\u004b\u0002\u004b\u0006\u004a" +
+          "\u0005\u004a\u0003\u005c\u0002\u0041\u0007\u005b\u001e\u0041\u0004\u005b\u0005\u0041\u0005\u0057\u0006\u0055\u0002\u0057" +
+          "\u0001\u0055\u0004\u001d\u000b\u005e\u000a\u0069\u0016\u005e\u000d\u000b\u0001\u005d\u0002\u000b\u0001\u007b\u0003\u0062" +
+          "\u0001\u000b\u0002\u0062\u0005\u0071\u0004\u0062\u0004\u0072\u0001\u0071\u0003\u0072\u0001\u0071\u0005\u0072\u0002\u0038" +
+          "\u0001\u003b\u0002\u0038\u0001\u003b\u0001\u0038\u0002\u003b\u0001\u0038\u0001\u003b\u000a\u0038\u0001\u003b\u0004\u0005" +
+          "\u0001\u0064\u0001\u0063\u0001\u0065\u0001\u000a\u0003\u0075\u0001\u0065\u0002\u0075\u0001\u0059\u0002\u005a\u0002\u0075" +
+          "\u0001\u000a\u0001\u0075\u0001\u000a\u0001\u0075\u0001\u000a\u0001\u0075\u0003\u000a\u0001\u0075\u0002\u000a\u0001\u0075" +
+          "\u0001\u000a\u0002\u0075\u0001\u000a\u0001\u0075\u0001\u000a\u0001\u0075\u0001\u000a\u0001\u0075\u0001\u000a\u0001\u0075" +
+          "\u0001\u000a\u0001\u003e\u0002\u003a\u0001\u003e\u0001\u003a\u0002\u003e\u0004\u003a\u0001\u003e\u0007\u003a\u0001\u003e" +
+          "\u0004\u003a\u0001\u003e\u0004\u003a\u0001\u0075\u0001\u000a\u0001\u0075\u000a\u0019\u0001\u002f\u0011\u0019\u0001\u002f" +
+          "\u0003\u001a\u0001\u002f\u0003\u0019\u0001\u002f\u0001\u0019\u0002\u0002\u0002\u0019\u0001\u002f\u000d\u0054\u0004\u0027" +
+          "\u0004\u002c\u0001\u0066\u0001\u002e\u0008\u0066\u0007\u002c\u0006\u0075\u0004\u0015\u0001\u0017\u001f\u0015\u0001\u0017" +
+          "\u0004\u0015\u0015\u0045\u0001\u0079\u0009\u0045\u0011\u0016\u0005\u0045\u0001\u0007\u000a\u002d\u0005\u0045\u0006\u0044" +
+          "\u0004\u003e\u0001\u003f\u0001\u0016\u0005\u0053\u000a\u0051\u000f\u0053\u0001\u003c\u0003\u0039\u000c\u0050\u0001\u0009" +
+          "\u0009\u0026\u0001\u002a\u0005\u0026\u0004\u0052\u000b\u0029\u0002\u000c\u0009\u0026\u0001\u002a\u0019\u0026\u0001\u002a" +
+          "\u0004\u0009\u0004\u0026\u0002\u002a\u0002\u006b\u0001\u0010\u0005\u006b\u002a\u0010\u1900\0\u016e\u0086\u0002\0" +
+          "\u006a\u0086\u0026\0\u0007\u007c\u000c\0\u0005\u007c\u0005\0\u0001\u007c\u0001\u007d\u000a\u007c\u0001\0" +
+          "\u000d\u007c\u0001\0\u0005\u007c\u0001\0\u0001\u007c\u0001\0\u0002\u007c\u0001\0\u0002\u007c\u0001\0" +
+          "\u006c\u007c\u0021\0\u016b\u007c\u0012\0\u0040\u007c\u0002\0\u0036\u007c\u0028\0\u000c\u007c\u0004\0" +
+          "\u0010\u007d\u0001\u0081\u0002\0\u0001\u0080\u0001\u0081\u000b\0\u0007\u007d\u000c\0\u0002\u0083\u0018\0" +
+          "\u0003\u0083\u0001\u0081\u0001\0\u0001\u0082\u0001\0\u0001\u0081\u0001\u0080\u001a\0\u0005\u007c\u0001\0" +
+          "\u0087\u007c\u0002\0\u0001\u007d\u0007\0\u0001\u0082\u0004\0\u0001\u0081\u0001\0\u0001\u0082\u0001\0" +
+          "\u000a\u007e\u0001\u0080\u0001\u0081\u0005\0\u001a\u007c\u0004\0\u0001\u0083\u0001\0\u001a\u007c\u000b\0" +
+          "\u0038\u007f\u0002\u007d\u001f\u0088\u0003\0\u0006\u0088\u0002\0\u0006\u0088\u0002\0\u0006\u0088\u0002\0" +
+          "\u0003\u0088\u001c\0\u0003\u007d\u0004\0";
+
+        /** 
+         * Translates characters to character classes
+         */
+        private static readonly char[] ZZ_CMAP = zzUnpackCMap(ZZ_CMAP_PACKED);
+
+        /** 
+         * Translates DFA states to action switch labels.
+         */
+        private static readonly int[] ZZ_ACTION = zzUnpackAction();
+
+        private const String ZZ_ACTION_PACKED_0 =
+          "\u0001\0\u0016\u0001\u0001\u0002\u0001\u0003\u0001\u0004\u0001\u0001\u0001\u0005\u0001\u0006" +
+          "\u0001\u0007\u0001\u0008\u0010\0\u0001\u0002\u0001\0\u0001\u0002\u000a\0\u0001\u0003" +
+          "\u0011\0\u0001\u0002\u004d\0";
+
+        private static int[] zzUnpackAction()
+        {
+            int[] result = new int[156];
+            int offset = 0;
+            offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackAction(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+
+        /** 
+         * Translates a state to a row index in the transition table
+         */
+        private static readonly int[] ZZ_ROWMAP = zzUnpackRowMap();
+
+        private const String ZZ_ROWMAP_PACKED_0 =
+          "\0\0\0\u0089\0\u0112\0\u019b\0\u0224\0\u02ad\0\u0336\0\u03bf" +
+          "\0\u0448\0\u04d1\0\u055a\0\u05e3\0\u066c\0\u06f5\0\u077e\0\u0807" +
+          "\0\u0890\0\u0919\0\u09a2\0\u0a2b\0\u0ab4\0\u0b3d\0\u0bc6\0\u0c4f" +
+          "\0\u0cd8\0\u0d61\0\u0dea\0\u0e73\0\u0efc\0\u0f85\0\u100e\0\u0112" +
+          "\0\u019b\0\u1097\0\u1120\0\u0336\0\u03bf\0\u0448\0\u04d1\0\u11a9" +
+          "\0\u1232\0\u12bb\0\u1344\0\u077e\0\u13cd\0\u1456\0\u14df\0\u1568" +
+          "\0\u15f1\0\u167a\0\u1703\0\u02ad\0\u178c\0\u1815\0\u066c\0\u189e" +
+          "\0\u1927\0\u19b0\0\u1a39\0\u1ac2\0\u1b4b\0\u1bd4\0\u1c5d\0\u1ce6" +
+          "\0\u1d6f\0\u1df8\0\u1e81\0\u1f0a\0\u1f93\0\u201c\0\u20a5\0\u212e" +
+          "\0\u21b7\0\u2240\0\u22c9\0\u2352\0\u23db\0\u0dea\0\u2464\0\u24ed" +
+          "\0\u2576\0\u25ff\0\u2688\0\u2711\0\u279a\0\u2823\0\u28ac\0\u2935" +
+          "\0\u29be\0\u2a47\0\u2ad0\0\u2b59\0\u2be2\0\u2c6b\0\u2cf4\0\u2d7d" +
+          "\0\u2e06\0\u2e8f\0\u2f18\0\u2fa1\0\u302a\0\u30b3\0\u313c\0\u31c5" +
+          "\0\u324e\0\u32d7\0\u3360\0\u33e9\0\u3472\0\u34fb\0\u3584\0\u360d" +
+          "\0\u3696\0\u371f\0\u37a8\0\u3831\0\u38ba\0\u3943\0\u39cc\0\u3a55" +
+          "\0\u3ade\0\u3b67\0\u3bf0\0\u3c79\0\u3d02\0\u3d8b\0\u3e14\0\u3e9d" +
+          "\0\u3f26\0\u3faf\0\u4038\0\u40c1\0\u414a\0\u41d3\0\u425c\0\u42e5" +
+          "\0\u436e\0\u43f7\0\u4480\0\u4509\0\u4592\0\u461b\0\u46a4\0\u472d" +
+          "\0\u47b6\0\u483f\0\u48c8\0\u4951\0\u49da\0\u4a63\0\u4aec\0\u4b75" +
+          "\0\u4bfe\0\u4c87\0\u4d10\0\u4d99";
+
+        private static int[] zzUnpackRowMap()
+        {
+            int[] result = new int[156];
+            int offset = 0;
+            offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackRowMap(String packed, int offset, int[] result)
+        {
+            int i = 0;  /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int high = packed[i++] << 16;
+                result[j++] = high | packed[i++];
+            }
+            return j;
+        }
+
+        /** 
+         * The transition table of the DFA
+         */
+        private static readonly int[] ZZ_TRANS = zzUnpackTrans();
+
+        private const String ZZ_TRANS_PACKED_0 =
+          "\u0001\u0002\u0001\u0003\u0001\u0002\u0001\u0004\u0002\u0002\u0001\u0005\u0001\u0002\u0001\u0006" +
+          "\u0004\u0002\u0001\u0007\u0001\u0002\u0001\u0008\u0001\u0002\u0001\u0009\u0002\u0002\u0001\u000a" +
+          "\u0003\u0002\u0001\u000b\u0002\u0002\u0001\u000c\u0004\u0002\u0001\u000d\u0003\u0002\u0001\u000e" +
+          "\u000f\u0002\u0001\u000f\u0002\u0002\u0001\u0010\u0036\u0002\u0001\u0011\u0001\u0002\u0001\u0012" +
+          "\u0002\u0002\u0001\u0013\u0001\u0014\u0001\u0002\u0001\u0015\u0001\u0002\u0001\u0016\u0001\u0002" +
+          "\u0001\u0017\u0001\u0002\u0001\u0018\u0001\u0002\u0001\u0019\u0001\u001a\u0003\u0002\u0001\u001b" +
+          "\u0002\u001c\u0001\u001d\u0001\u001e\u0001\u001f\u008b\0\u0001\u0018\u0002\0\u0001\u0018" +
+          "\u0004\0\u0001\u0018\u000e\0\u0001\u0018\u000d\0\u0001\u0018\u0010\0\u0001\u0018" +
+          "\u0001\0\u0001\u0018\u0019\0\u0001\u0018\u0004\0\u0001\u0018\u0008\0\u0002\u0018" +
+          "\u000d\0\u0002\u0018\u0008\0\u0001\u0018\u0011\0\u0002\u0018\u0005\0\u0001\u0018" +
+          "\u0002\0\u0001\u0018\u0003\0\u0002\u0018\u0008\0\u0004\u0018\u0001\0\u0003\u0018" +
+          "\u0001\0\u0001\u0018\u0002\0\u0001\u0018\u0002\0\u0001\u0018\u0004\0\u0004\u0018" +
+          "\u0001\0\u0002\u0018\u0001\0\u0001\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0002\0\u0004\u0018\u0002\0\u0003\u0018\u0001\0\u0002\u0018\u0001\0\u0003\u0018" +
+          "\u0005\0\u0004\u0018\u0002\0\u0008\u0018\u0001\0\u0001\u0018\u0002\0\u0004\u0018" +
+          "\u0001\0\u0002\u0018\u0001\0\u0001\u0018\u0001\0\u0002\u0018\u0004\0\u0001\u0018" +
+          "\u0003\0\u0001\u0018\u0014\0\u0001\u0018\u0004\0\u0001\u0018\u0009\0\u0001\u0018" +
+          "\u0012\0\u0001\u0018\u0003\0\u0001\u0018\u0017\0\u0001\u0018\u0033\0\u0001\u0018" +
+          "\u0014\0\u0001\u0018\u0003\0\u0004\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0019" +
+          "\u0002\0\u0001\u0018\u0001\0\u0002\u0018\u0002\0\u0002\u0018\u0002\0\u0003\u0018" +
+          "\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0004\u0018\u0001\0\u0003\u0018" +
+          "\u0001\0\u0001\u0018\u0001\0\u0003\u0018\u0001\0\u0002\u0018\u0001\0\u0004\u0018" +
+          "\u0001\0\u0002\u0018\u0002\0\u0008\u0018\u0001\0\u0002\u0018\u0001\0\u0009\u0018" +
+          "\u0001\0\u0008\u0018\u0001\0\u000b\u0018\u0001\u0019\u0001\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0002\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0003\0" +
+          "\u0001\u0018\u001b\0\u0001\u0018\u000f\0\u0001\u0018\u0013\0\u0001\u0018\u0013\0" +
+          "\u0001\u0018\u0006\0\u0003\u0018\u001f\0\u0001\u0018\u0007\0\u0001\u0018\u0013\0" +
+          "\u0001\u0018\u0001\0\u0002\u0018\u0001\0\u0001\u0018\u0001\0\u0004\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0002\u0018\u0001\0\u0003\u0018\u0001\0" +
+          "\u0002\u0018\u0001\0\u0004\u0018\u0001\0\u0003\u0018\u0001\0\u000f\u0018\u0001\0" +
+          "\u0002\u0018\u0001\0\u0011\u0018\u0001\0\u0002\u0018\u0001\0\u0021\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0002\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u001b\0\u0001\u0018\u0003\0\u0002\u0018\u000a\0" +
+          "\u0002\u0018\u000b\0\u0001\u0018\u0006\0\u0001\u0018\u0002\0\u0002\u0018\u0006\0" +
+          "\u0001\u0018\u0004\0\u0002\u0018\u0002\0\u0002\u0018\u0005\0\u0003\u0018\u0008\0" +
+          "\u0001\u0018\u0016\0\u0001\u0018\u0007\0\u0001\u0018\u0013\0\u0001\u0018\u0001\0" +
+          "\u0002\u0018\u0001\0\u0001\u0018\u0002\0\u0002\u0018\u0002\0\u0001\u0018\u0003\0" +
+          "\u0002\u0018\u0001\0\u0003\u0018\u0001\0\u0002\u0018\u0001\0\u0004\u0018\u0001\0" +
+          "\u0003\u0018\u0001\0\u0001\u0018\u0001\0\u0002\u0018\u0002\0\u0009\u0018\u0001\0" +
+          "\u0002\u0018\u0001\0\u0001\u0018\u0001\0\u0002\u0018\u0001\0\u000c\u0018\u0001\0" +
+          "\u0002\u0018\u0001\0\u0003\u0018\u0001\0\u0001\u0018\u0001\0\u0018\u0018\u0001\0" +
+          "\u0002\u0018\u0001\0\u0001\u0018\u0001\0\u0002\u0018\u0002\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u000f\0\u0001\u0018\u0016\0" +
+          "\u0002\u0018\u0013\0\u0001\u0019\u0001\u0018\u0036\0\u0001\u0019\u0026\0\u0001\u0019" +
+          "\u0017\0\u0004\u0018\u0002\0\u0002\u0018\u000c\0\u0003\u0018\u000d\0\u0003\u0018" +
+          "\u0003\0\u0001\u0018\u0007\0\u0002\u0018\u000b\0\u0001\u0018\u000b\0\u0004\u0019" +
+          "\u0001\0\u0002\u0018\u0009\0\u0001\u0018\u001f\0\u0001\u0018\u0003\0\u0002\u0018" +
+          "\u000a\0\u0002\u0018\u0001\0\u0003\u0018\u0007\0\u0001\u0018\u0006\0\u0002\u0018" +
+          "\u0001\0\u0002\u0018\u0006\0\u0001\u0018\u0004\0\u0002\u0018\u0002\0\u0002\u0018" +
+          "\u0005\0\u0003\u0018\u0008\0\u0001\u0018\u000e\0\u0001\u0018\u0004\0\u0002\u0019" +
+          "\u0001\0\u0001\u0018\u0007\0\u0001\u0018\u0013\0\u0001\u0018\u0004\0\u0001\u0018" +
+          "\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0006\0\u0001\u0018\u0005\0\u0001\u0018" +
+          "\u0002\0\u0002\u0018\u0001\0\u000f\u0018\u0002\0\u0001\u0018\u000b\0\u0007\u0018" +
+          "\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0001\u0018" +
+          "\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0006\0\u0002\u0018" +
+          "\u0005\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0003\u0018\u0001\0\u0001\u0018" +
+          "\u0007\0\u0001\u0018\u0001\0\u0001\u0018\u001d\0\u0001\u0018\u000f\0\u0002\u0018" +
+          "\u0012\0\u0001\u0018\u0002\0\u0002\u0018\u000b\0\u0001\u0018\u0003\0\u0002\u0018" +
+          "\u0005\0\u0003\u0018\u0008\0\u0001\u0018\u0016\0\u0001\u0018\u0007\0\u0001\u0018" +
+          "\u0018\0\u0001\u0018\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0003\0\u0001\u0018" +
+          "\u0007\0\u0001\u0018\u0019\0\u0010\u0018\u0005\0\u0003\u0018\u0003\0\u0001\u0018" +
+          "\u0003\0\u0002\u0018\u0002\0\u0002\u0018\u0004\0\u0001\u0018\u0008\0\u0001\u0018" +
+          "\u0004\0\u0001\u0018\u0002\0\u0001\u0018\u0004\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0001\0\u0001\u0018\u005a\0\u0001\u001e\u0021\0\u0001\u001a\u001d\0\u0001\u001d" +
+          "\u0006\0\u0001\u001d\u0002\0\u0001\u001d\u0003\0\u0002\u001d\u0008\0\u0004\u001d" +
+          "\u0001\0\u0003\u001d\u0001\0\u0001\u001d\u0002\0\u0001\u001d\u0002\0\u0001\u001d" +
+          "\u0004\0\u0004\u001d\u0001\0\u0002\u001d\u0006\0\u0001\u001d\u0002\0\u0004\u001d" +
+          "\u0002\0\u0003\u001d\u0001\0\u0002\u001d\u0001\0\u0003\u001d\u0005\0\u0004\u001d" +
+          "\u0002\0\u0008\u001d\u0004\0\u0004\u001d\u0001\0\u0002\u001d\u0001\0\u0001\u001d" +
+          "\u0001\0\u0002\u001d\u0004\0\u0001\u001d\u0003\0\u0001\u001d\u000f\0\u0001\u001d" +
+          "\u0001\0\u0002\u001d\u0001\0\u0001\u001d\u0001\0\u0004\u001d\u0001\0\u0001\u001d" +
+          "\u0001\0\u0001\u001d\u0001\0\u0002\u001d\u0001\0\u0003\u001d\u0001\0\u0002\u001d" +
+          "\u0001\0\u0004\u001d\u0001\0\u0003\u001d\u0001\0\u000f\u001d\u0001\0\u0002\u001d" +
+          "\u0001\0\u0011\u001d\u0001\0\u0002\u001d\u0001\0\u0021\u001d\u0001\0\u0001\u001d" +
+          "\u0001\0\u0002\u001d\u0002\0\u0001\u001d\u0001\0\u0001\u001d\u0001\0\u0001\u001d" +
+          "\u0001\0\u0001\u001d\u000f\0\u0001\u001d\u0001\0\u0002\u001d\u0001\0\u0001\u001d" +
+          "\u0001\0\u0004\u001d\u0001\0\u0001\u001d\u0001\0\u0001\u001d\u0001\0\u0002\u001d" +
+          "\u0002\0\u0001\u001d\u0002\0\u0002\u001d\u0001\0\u0004\u001d\u0001\0\u0003\u001d" +
+          "\u0001\0\u000f\u001d\u0001\0\u0002\u001d\u0001\0\u0011\u001d\u0001\0\u0002\u001d" +
+          "\u0001\0\u0021\u001d\u0001\0\u0001\u001d\u0001\0\u0002\u001d\u0002\0\u0001\u001d" +
+          "\u0001\0\u0001\u001d\u0001\0\u0001\u001d\u0001\0\u0001\u001d\u001b\0\u0001\u001d" +
+          "\u000f\0\u0001\u001d\u0013\0\u0001\u001d\u001a\0\u0001\u001d\u0021\0\u0001\u001d" +
+          "\u0007\0\u0001\u001d\u0013\0\u0001\u001d\u0001\0\u0002\u001d\u0003\0\u0004\u001d" +
+          "\u0001\0\u0001\u001d\u0001\0\u0001\u001d\u0001\0\u0002\u001d\u0001\0\u0003\u001d" +
+          "\u0001\0\u0002\u001d\u0001\0\u0004\u001d\u0001\0\u0003\u001d\u0001\0\u0008\u001d" +
+          "\u0001\0\u0006\u001d\u0001\0\u0002\u001d\u0001\0\u0011\u001d\u0001\0\u0002\u001d" +
+          "\u0001\0\u0021\u001d\u0001\0\u0001\u001d\u0001\0\u0002\u001d\u0002\0\u0001\u001d" +
+          "\u0001\0\u0001\u001d\u0001\0\u0001\u001d\u0001\0\u0001\u001d\u0088\0\u0001\u001e" +
+          "\u000e\0\u0001\u0020\u0001\0\u0001\u0021\u0002\0\u0001\u0022\u0001\0\u0001\u0023" +
+          "\u0004\0\u0001\u0024\u0001\0\u0001\u0025\u0001\0\u0001\u0026\u0002\0\u0001\u0027" +
+          "\u0003\0\u0001\u0028\u0002\0\u0001\u0029\u0004\0\u0001\u002a\u0003\0\u0001\u002b" +
+          "\u000f\0\u0001\u002c\u0002\0\u0001\u002d\u0011\0\u0001\u002e\u0002\0\u0001\u002f" +
+          "\u002f\0\u0002\u0018\u0001\u0030\u0001\0\u0001\u0031\u0001\0\u0001\u0031\u0001\u0032" +
+          "\u0001\0\u0001\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0020\u0001\0\u0001\u0021" +
+          "\u0002\0\u0001\u0033\u0001\0\u0001\u0034\u0004\0\u0001\u0024\u0001\0\u0001\u0025" +
+          "\u0001\0\u0001\u0026\u0002\0\u0001\u0027\u0003\0\u0001\u0035\u0002\0\u0001\u0036" +
+          "\u0004\0\u0001\u0037\u0003\0\u0001\u0038\u000f\0\u0001\u002c\u0002\0\u0001\u0039" +
+          "\u0011\0\u0001\u003a\u0002\0\u0001\u003b\u002f\0\u0001\u0018\u0002\u0019\u0002\0" +
+          "\u0002\u003c\u0001\u003d\u0001\0\u0001\u0019\u0002\0\u0001\u0018\u0006\0\u0001\u003e" +
+          "\u0011\0\u0001\u003f\u0002\0\u0001\u0040\u0008\0\u0001\u0041\u0012\0\u0001\u0042" +
+          "\u0011\0\u0001\u0043\u0002\0\u0001\u0044\u0021\0\u0001\u0045\u000e\0\u0001\u001a" +
+          "\u0001\0\u0001\u001a\u0003\0\u0001\u0032\u0001\0\u0001\u001a\u0004\0\u0001\u0020" +
+          "\u0001\0\u0001\u0021\u0002\0\u0001\u0046\u0001\0\u0001\u0034\u0004\0\u0001\u0024" +
+          "\u0001\0\u0001\u0025\u0001\0\u0001\u0026\u0002\0\u0001\u0027\u0003\0\u0001\u0047" +
+          "\u0002\0\u0001\u0048\u0004\0\u0001\u0037\u0003\0\u0001\u0049\u000f\0\u0001\u002c" +
+          "\u0002\0\u0001\u004a\u0011\0\u0001\u004b\u0002\0\u0001\u004c\u0021\0\u0001\u004d" +
+          "\u000d\0\u0001\u0018\u0001\u004e\u0001\u0019\u0001\u004f\u0003\0\u0001\u004e\u0001\0" +
+          "\u0001\u004e\u0002\0\u0001\u0018\u0084\0\u0002\u001c\u0009\0\u0001\u0050\u0011\0" +
+          "\u0001\u0051\u0002\0\u0001\u0052\u0008\0\u0001\u0053\u0012\0\u0001\u0054\u0011\0" +
+          "\u0001\u0055\u0002\0\u0001\u0056\u0030\0\u0001\u001d\u0007\0\u0001\u001d\u0009\0" +
+          "\u0001\u0057\u0011\0\u0001\u0058\u0002\0\u0001\u0059\u0008\0\u0001\u005a\u0012\0" +
+          "\u0001\u005b\u0011\0\u0001\u005c\u0002\0\u0001\u005d\u0030\0\u0001\u001e\u0007\0" +
+          "\u0001\u001e\u0004\0\u0001\u0020\u0001\0\u0001\u0021\u0002\0\u0001\u005e\u0001\0" +
+          "\u0001\u0023\u0004\0\u0001\u0024\u0001\0\u0001\u0025\u0001\0\u0001\u0026\u0002\0" +
+          "\u0001\u0027\u0003\0\u0001\u005f\u0002\0\u0001\u0060\u0004\0\u0001\u002a\u0003\0" +
+          "\u0001\u0061\u000f\0\u0001\u002c\u0002\0\u0001\u0062\u0011\0\u0001\u0063\u0002\0" +
+          "\u0001\u0064\u002f\0\u0001\u0018\u0001\u001f\u0001\u0030\u0001\0\u0001\u0031\u0001\0" +
+          "\u0001\u0031\u0001\u0032\u0001\0\u0001\u001f\u0002\0\u0001\u001f\u0007\0\u0001\u0018" +
+          "\u0004\0\u0001\u0018\u0009\0\u0001\u0018\u0012\0\u0001\u0018\u0003\0\u0001\u0018" +
+          "\u000b\0\u0001\u0018\u0002\0\u0001\u0018\u0008\0\u0001\u0018\u000a\0\u0004\u0018" +
+          "\u0025\0\u0001\u0018\u0014\0\u0001\u0018\u0003\0\u0004\u0018\u0001\0\u0001\u0018" +
+          "\u0001\0\u0001\u0030\u0002\0\u0001\u0018\u0001\0\u0002\u0018\u0002\0\u0002\u0018" +
+          "\u0002\0\u0003\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0004\u0018" +
+          "\u0001\0\u0003\u0018\u0001\0\u0001\u0018\u0001\0\u0003\u0018\u0001\0\u0002\u0018" +
+          "\u0001\0\u0004\u0018\u0001\0\u0002\u0018\u0002\0\u0008\u0018\u0001\0\u0002\u0018" +
+          "\u0001\0\u0009\u0018\u0001\0\u0008\u0018\u0001\0\u000b\u0018\u0001\u0030\u0001\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0002\u0018\u0002\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0003\0\u0001\u0018\u000f\0\u0001\u0018\u0016\0\u0002\u0018\u0013\0" +
+          "\u0001\u0030\u0001\u0018\u0024\0\u0001\u0018\u0011\0\u0001\u0030\u0026\0\u0001\u0030" +
+          "\u0009\0\u0001\u0018\u000d\0\u0004\u0018\u0002\0\u0002\u0018\u000c\0\u0004\u0018" +
+          "\u0001\0\u0002\u0018\u0009\0\u0003\u0018\u0003\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0004\0\u0003\u0018\u0005\0\u0004\u0018\u0002\0\u0002\u0018\u000a\0\u0004\u0030" +
+          "\u0001\0\u0002\u0018\u0001\0\u0001\u0018\u0007\0\u0001\u0018\u001f\0\u0001\u0018" +
+          "\u0003\0\u0002\u0018\u000a\0\u0002\u0018\u0001\0\u0003\u0018\u0007\0\u0001\u0018" +
+          "\u0006\0\u0002\u0018\u0001\0\u0002\u0018\u0006\0\u0001\u0018\u0004\0\u0002\u0018" +
+          "\u0002\0\u0002\u0018\u0005\0\u0003\u0018\u0008\0\u0001\u0018\u000e\0\u0001\u0018" +
+          "\u0004\0\u0002\u0030\u0001\0\u0001\u0018\u0007\0\u0001\u0018\u0013\0\u0001\u0018" +
+          "\u0004\0\u0001\u0018\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0006\0\u0001\u0018" +
+          "\u0005\0\u0001\u0018\u0002\0\u0002\u0018\u0001\0\u000f\u0018\u0002\0\u0001\u0018" +
+          "\u000b\0\u0007\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0004\0\u0001\u0018\u0001\0\u0002\u0018\u0005\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0002\0\u0003\u0018\u0001\0\u0001\u0018\u0007\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0016\0\u0001\u0018\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0003\0\u0001\u0018" +
+          "\u0007\0\u0001\u0018\u0019\0\u0010\u0018\u0005\0\u0003\u0018\u0003\0\u0001\u0018" +
+          "\u0003\0\u0002\u0018\u0002\0\u0002\u0018\u0004\0\u0005\u0018\u0004\0\u0001\u0018" +
+          "\u0004\0\u0001\u0018\u0002\0\u0001\u0018\u0004\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0001\0\u0001\u0018\u0057\0\u0002\u0018\u000d\0\u0004\u0018\u0030\0\u0001\u0018" +
+          "\u000d\0\u0002\u0018\u0008\0\u0002\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0009\0\u0001\u0018\u0009\0\u0002\u0018\u0006\0\u0001\u0018\u0002\0\u0004\u0018" +
+          "\u0003\0\u0001\u0018\u0002\0\u0002\u0018\u0001\0\u0003\u0018\u0005\0\u0001\u0018" +
+          "\u0001\0\u0002\u0018\u0002\0\u0002\u0018\u0001\0\u0004\u0018\u0005\0\u0001\u0018" +
+          "\u0001\0\u0002\u0018\u001f\0\u0001\u0020\u0001\0\u0001\u0021\u0002\0\u0001\u0065" +
+          "\u0001\0\u0001\u0023\u0004\0\u0001\u0024\u0001\0\u0001\u0025\u0001\0\u0001\u0026" +
+          "\u0002\0\u0001\u0027\u0003\0\u0001\u0066\u0002\0\u0001\u0067\u0004\0\u0001\u002a" +
+          "\u0003\0\u0001\u0068\u000f\0\u0001\u002c\u0002\0\u0001\u0069\u0011\0\u0001\u006a" +
+          "\u0002\0\u0001\u006b\u002f\0\u0001\u0018\u0002\u0030\u0002\0\u0002\u006c\u0001\u0032" +
+          "\u0001\0\u0001\u0030\u0002\0\u0001\u0018\u0001\0\u0001\u0020\u0001\0\u0001\u0021" +
+          "\u0002\0\u0001\u006d\u0001\0\u0001\u006e\u0004\0\u0001\u0024\u0001\0\u0001\u0025" +
+          "\u0001\0\u0001\u0026\u0002\0\u0001\u0027\u0003\0\u0001\u006f\u0002\0\u0001\u0070" +
+          "\u0004\0\u0001\u0071\u0003\0\u0001\u0072\u000f\0\u0001\u002c\u0002\0\u0001\u0073" +
+          "\u0011\0\u0001\u0074\u0002\0\u0001\u0075\u002f\0\u0001\u0018\u0001\u0031\u0007\0" +
+          "\u0001\u0031\u0002\0\u0001\u0018\u0001\0\u0001\u0020\u0001\0\u0001\u0021\u0002\0" +
+          "\u0001\u0076\u0001\0\u0001\u0023\u0004\0\u0001\u0024\u0001\0\u0001\u0025\u0001\0" +
+          "\u0001\u0026\u0002\0\u0001\u0027\u0003\0\u0001\u0077\u0002\0\u0001\u0078\u0004\0" +
+          "\u0001\u002a\u0003\0\u0001\u0079\u000f\0\u0001\u002c\u0002\0\u0001\u007a\u0011\0" +
+          "\u0001\u007b\u0002\0\u0001\u007c\u0021\0\u0001\u004d\u000d\0\u0001\u0018\u0001\u0032" +
+          "\u0001\u0030\u0001\u004f\u0003\0\u0001\u0032\u0001\0\u0001\u0032\u0002\0\u0001\u0018" +
+          "\u0007\0\u0001\u0018\u0004\0\u0001\u0018\u0009\0\u0001\u0018\u0012\0\u0001\u0018" +
+          "\u0003\0\u0001\u0018\u000b\0\u0001\u0019\u0002\0\u0001\u0019\u0008\0\u0001\u0018" +
+          "\u000a\0\u0004\u0019\u0025\0\u0001\u0018\u0011\0\u0001\u0018\u0016\0\u0002\u0018" +
+          "\u0013\0\u0001\u0019\u0001\u0018\u0024\0\u0001\u0019\u0011\0\u0001\u0019\u0026\0" +
+          "\u0001\u0019\u0009\0\u0001\u0019\u000d\0\u0004\u0018\u0002\0\u0002\u0018\u000c\0" +
+          "\u0003\u0018\u0001\u0019\u0001\0\u0002\u0019\u0009\0\u0003\u0018\u0003\0\u0001\u0018" +
+          "\u0001\0\u0001\u0019\u0004\0\u0001\u0019\u0002\u0018\u0005\0\u0004\u0019\u0002\0" +
+          "\u0001\u0018\u0001\u0019\u000a\0\u0004\u0019\u0001\0\u0002\u0018\u0001\0\u0001\u0019" +
+          "\u0007\0\u0001\u0018\u0013\0\u0001\u0018\u0004\0\u0001\u0018\u0006\0\u0001\u0018" +
+          "\u0003\0\u0001\u0018\u0006\0\u0001\u0018\u0005\0\u0001\u0018\u0002\0\u0002\u0018" +
+          "\u0001\0\u000f\u0018\u0002\0\u0001\u0018\u000b\0\u0007\u0018\u0002\0\u0001\u0018" +
+          "\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0004\0\u0001\u0019\u0001\0\u0002\u0018" +
+          "\u0005\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0003\u0018\u0001\0\u0001\u0018" +
+          "\u0007\0\u0001\u0018\u0001\0\u0001\u0018\u0016\0\u0001\u0018\u0006\0\u0001\u0018" +
+          "\u0003\0\u0001\u0018\u0003\0\u0001\u0018\u0007\0\u0001\u0018\u0019\0\u0010\u0018" +
+          "\u0005\0\u0003\u0018\u0003\0\u0001\u0018\u0003\0\u0002\u0018\u0002\0\u0002\u0018" +
+          "\u0004\0\u0001\u0018\u0004\u0019\u0004\0\u0001\u0018\u0004\0\u0001\u0018\u0002\0" +
+          "\u0001\u0018\u0004\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0057\0" +
+          "\u0002\u0019\u000d\0\u0004\u0019\u0030\0\u0001\u0019\u000d\0\u0002\u0019\u0008\0" +
+          "\u0002\u0019\u0001\0\u0001\u0019\u0001\0\u0001\u0019\u0009\0\u0001\u0019\u0009\0" +
+          "\u0002\u0019\u0006\0\u0001\u0019\u0002\0\u0004\u0019\u0003\0\u0001\u0019\u0002\0" +
+          "\u0002\u0019\u0001\0\u0003\u0019\u0005\0\u0001\u0019\u0001\0\u0002\u0019\u0002\0" +
+          "\u0002\u0019\u0001\0\u0004\u0019\u0005\0\u0001\u0019\u0001\0\u0002\u0019\u0024\0" +
+          "\u0001\u007d\u0001\0\u0001\u007e\u000f\0\u0001\u007f\u0002\0\u0001\u0080\u0004\0" +
+          "\u0001\u0081\u0003\0\u0001\u0082\u0012\0\u0001\u0083\u0011\0\u0001\u0084\u0002\0" +
+          "\u0001\u0085\u0030\0\u0001\u003c\u0001\u0019\u0006\0\u0001\u003c\u0004\0\u0001\u0020" +
+          "\u0001\0\u0001\u0021\u0002\0\u0001\u0086\u0001\0\u0001\u0034\u0004\0\u0001\u0024" +
+          "\u0001\0\u0001\u0025\u0001\0\u0001\u0026\u0002\0\u0001\u0027\u0003\0\u0001\u0087" +
+          "\u0002\0\u0001\u0088\u0004\0\u0001\u0037\u0003\0\u0001\u0089\u000f\0\u0001\u002c" +
+          "\u0002\0\u0001\u008a\u0011\0\u0001\u008b\u0002\0\u0001\u008c\u0021\0\u0001\u004d" +
+          "\u000d\0\u0001\u0018\u0001\u003d\u0001\u0019\u0001\u004f\u0003\0\u0001\u003d\u0001\0" +
+          "\u0001\u003d\u0002\0\u0001\u0018\u0039\0\u0001\u001a\u0002\0\u0001\u001a\u0013\0" +
+          "\u0004\u001a\u0089\0\u0001\u001a\u0042\0\u0001\u001a\u0024\0\u0001\u001a\u0001\0" +
+          "\u0002\u001a\u0011\0\u0001\u001a\u0004\0\u0001\u001a\u0007\0\u0004\u001a\u0003\0" +
+          "\u0001\u001a\u0012\0\u0001\u001a\u0076\0\u0001\u001a\u008d\0\u0004\u001a\u006d\0" +
+          "\u0002\u001a\u000d\0\u0004\u001a\u0030\0\u0001\u001a\u000d\0\u0002\u001a\u0008\0" +
+          "\u0002\u001a\u0001\0\u0001\u001a\u0001\0\u0001\u001a\u0009\0\u0001\u001a\u0009\0" +
+          "\u0002\u001a\u0006\0\u0001\u001a\u0002\0\u0004\u001a\u0003\0\u0001\u001a\u0002\0" +
+          "\u0002\u001a\u0001\0\u0003\u001a\u0005\0\u0001\u001a\u0001\0\u0002\u001a\u0002\0" +
+          "\u0002\u001a\u0001\0\u0004\u001a\u0005\0\u0001\u001a\u0001\0\u0002\u001a\u008d\0" +
+          "\u0001\u001a\u0020\0\u0001\u0018\u0004\0\u0001\u0018\u0009\0\u0001\u0018\u0012\0" +
+          "\u0001\u0018\u0003\0\u0001\u0018\u000b\0\u0001\u004e\u0002\0\u0001\u004e\u0008\0" +
+          "\u0001\u0018\u000a\0\u0004\u004e\u0025\0\u0001\u0018\u0011\0\u0001\u0018\u0016\0" +
+          "\u0002\u0018\u0013\0\u0001\u0019\u0001\u0018\u0024\0\u0001\u004e\u0011\0\u0001\u0019" +
+          "\u0026\0\u0001\u0019\u0009\0\u0001\u004e\u000d\0\u0004\u0018\u0002\0\u0002\u0018" +
+          "\u000c\0\u0003\u0018\u0001\u004e\u0001\0\u0002\u004e\u0009\0\u0003\u0018\u0003\0" +
+          "\u0001\u0018\u0001\0\u0001\u004e\u0004\0\u0001\u004e\u0002\u0018\u0005\0\u0004\u004e" +
+          "\u0002\0\u0001\u0018\u0001\u004e\u000a\0\u0004\u0019\u0001\0\u0002\u0018\u0001\0" +
+          "\u0001\u004e\u0007\0\u0001\u0018\u0013\0\u0001\u0018\u0004\0\u0001\u0018\u0006\0" +
+          "\u0001\u0018\u0003\0\u0001\u0018\u0006\0\u0001\u0018\u0005\0\u0001\u0018\u0002\0" +
+          "\u0002\u0018\u0001\0\u000f\u0018\u0002\0\u0001\u0018\u000b\0\u0007\u0018\u0002\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0004\0\u0001\u004e\u0001\0" +
+          "\u0002\u0018\u0005\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0003\u0018\u0001\0" +
+          "\u0001\u0018\u0007\0\u0001\u0018\u0001\0\u0001\u0018\u0016\0\u0001\u0018\u0006\0" +
+          "\u0001\u0018\u0003\0\u0001\u0018\u0003\0\u0001\u0018\u0007\0\u0001\u0018\u0019\0" +
+          "\u0010\u0018\u0005\0\u0003\u0018\u0003\0\u0001\u0018\u0003\0\u0002\u0018\u0002\0" +
+          "\u0002\u0018\u0004\0\u0001\u0018\u0004\u004e\u0004\0\u0001\u0018\u0004\0\u0001\u0018" +
+          "\u0002\0\u0001\u0018\u0004\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0057\0\u0002\u004e\u000d\0\u0004\u004e\u0030\0\u0001\u004e\u000d\0\u0002\u004e" +
+          "\u0008\0\u0002\u004e\u0001\0\u0001\u004e\u0001\0\u0001\u004e\u0009\0\u0001\u004e" +
+          "\u0009\0\u0002\u004e\u0006\0\u0001\u004e\u0002\0\u0004\u004e\u0003\0\u0001\u004e" +
+          "\u0002\0\u0002\u004e\u0001\0\u0003\u004e\u0005\0\u0001\u004e\u0001\0\u0002\u004e" +
+          "\u0002\0\u0002\u004e\u0001\0\u0004\u004e\u0005\0\u0001\u004e\u0001\0\u0002\u004e" +
+          "\u008d\0\u0001\u004f\u001f\0\u0001\u008d\u0011\0\u0001\u008e\u0002\0\u0001\u008f" +
+          "\u0008\0\u0001\u0090\u0012\0\u0001\u0091\u0011\0\u0001\u0092\u0002\0\u0001\u0093" +
+          "\u0021\0\u0001\u004d\u000e\0\u0001\u004f\u0001\0\u0001\u004f\u0003\0\u0001\u0032" +
+          "\u0001\0\u0001\u004f\u003c\0\u0001\u001d\u0002\0\u0001\u001d\u0013\0\u0004\u001d" +
+          "\u0089\0\u0001\u001d\u0042\0\u0001\u001d\u0024\0\u0001\u001d\u0001\0\u0002\u001d" +
+          "\u0011\0\u0001\u001d\u0004\0\u0001\u001d\u0007\0\u0004\u001d\u0003\0\u0001\u001d" +
+          "\u0012\0\u0001\u001d\u0076\0\u0001\u001d\u008d\0\u0004\u001d\u006d\0\u0002\u001d" +
+          "\u000d\0\u0004\u001d\u0030\0\u0001\u001d\u000d\0\u0002\u001d\u0008\0\u0002\u001d" +
+          "\u0001\0\u0001\u001d\u0001\0\u0001\u001d\u0009\0\u0001\u001d\u0009\0\u0002\u001d" +
+          "\u0006\0\u0001\u001d\u0002\0\u0004\u001d\u0003\0\u0001\u001d\u0002\0\u0002\u001d" +
+          "\u0001\0\u0003\u001d\u0005\0\u0001\u001d\u0001\0\u0002\u001d\u0002\0\u0002\u001d" +
+          "\u0001\0\u0004\u001d\u0005\0\u0001\u001d\u0001\0\u0002\u001d\u0057\0\u0001\u001e" +
+          "\u0002\0\u0001\u001e\u0013\0\u0004\u001e\u0089\0\u0001\u001e\u0042\0\u0001\u001e" +
+          "\u0024\0\u0001\u001e\u0001\0\u0002\u001e\u0011\0\u0001\u001e\u0004\0\u0001\u001e" +
+          "\u0007\0\u0004\u001e\u0003\0\u0001\u001e\u0012\0\u0001\u001e\u0076\0\u0001\u001e" +
+          "\u008d\0\u0004\u001e\u006d\0\u0002\u001e\u000d\0\u0004\u001e\u0030\0\u0001\u001e" +
+          "\u000d\0\u0002\u001e\u0008\0\u0002\u001e\u0001\0\u0001\u001e\u0001\0\u0001\u001e" +
+          "\u0009\0\u0001\u001e\u0009\0\u0002\u001e\u0006\0\u0001\u001e\u0002\0\u0004\u001e" +
+          "\u0003\0\u0001\u001e\u0002\0\u0002\u001e\u0001\0\u0003\u001e\u0005\0\u0001\u001e" +
+          "\u0001\0\u0002\u001e\u0002\0\u0002\u001e\u0001\0\u0004\u001e\u0005\0\u0001\u001e" +
+          "\u0001\0\u0002\u001e\u0025\0\u0001\u0018\u0004\0\u0001\u0018\u0009\0\u0001\u0018" +
+          "\u0012\0\u0001\u0018\u0003\0\u0001\u0018\u000b\0\u0001\u001f\u0002\0\u0001\u001f" +
+          "\u0008\0\u0001\u0018\u000a\0\u0004\u001f\u0025\0\u0001\u0018\u0011\0\u0001\u0018" +
+          "\u0016\0\u0002\u0018\u0013\0\u0001\u0030\u0001\u0018\u0024\0\u0001\u001f\u0011\0" +
+          "\u0001\u0030\u0026\0\u0001\u0030\u0009\0\u0001\u001f\u000d\0\u0004\u0018\u0002\0" +
+          "\u0002\u0018\u000c\0\u0003\u0018\u0001\u001f\u0001\0\u0002\u001f\u0009\0\u0003\u0018" +
+          "\u0003\0\u0001\u0018\u0001\0\u0001\u001f\u0004\0\u0001\u001f\u0002\u0018\u0005\0" +
+          "\u0004\u001f\u0002\0\u0001\u0018\u0001\u001f\u000a\0\u0004\u0030\u0001\0\u0002\u0018" +
+          "\u0001\0\u0001\u001f\u0007\0\u0001\u0018\u0013\0\u0001\u0018\u0004\0\u0001\u0018" +
+          "\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0006\0\u0001\u0018\u0005\0\u0001\u0018" +
+          "\u0002\0\u0002\u0018\u0001\0\u000f\u0018\u0002\0\u0001\u0018\u000b\0\u0007\u0018" +
+          "\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0001\u0018" +
+          "\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0004\0\u0001\u001f" +
+          "\u0001\0\u0002\u0018\u0005\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0003\u0018" +
+          "\u0001\0\u0001\u0018\u0007\0\u0001\u0018\u0001\0\u0001\u0018\u0016\0\u0001\u0018" +
+          "\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0003\0\u0001\u0018\u0007\0\u0001\u0018" +
+          "\u0019\0\u0010\u0018\u0005\0\u0003\u0018\u0003\0\u0001\u0018\u0003\0\u0002\u0018" +
+          "\u0002\0\u0002\u0018\u0004\0\u0001\u0018\u0004\u001f\u0004\0\u0001\u0018\u0004\0" +
+          "\u0001\u0018\u0002\0\u0001\u0018\u0004\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0057\0\u0002\u001f\u000d\0\u0004\u001f\u0030\0\u0001\u001f\u000d\0" +
+          "\u0002\u001f\u0008\0\u0002\u001f\u0001\0\u0001\u001f\u0001\0\u0001\u001f\u0009\0" +
+          "\u0001\u001f\u0009\0\u0002\u001f\u0006\0\u0001\u001f\u0002\0\u0004\u001f\u0003\0" +
+          "\u0001\u001f\u0002\0\u0002\u001f\u0001\0\u0003\u001f\u0005\0\u0001\u001f\u0001\0" +
+          "\u0002\u001f\u0002\0\u0002\u001f\u0001\0\u0004\u001f\u0005\0\u0001\u001f\u0001\0" +
+          "\u0002\u001f\u0025\0\u0001\u0018\u0004\0\u0001\u0018\u0009\0\u0001\u0018\u0012\0" +
+          "\u0001\u0018\u0003\0\u0001\u0018\u000b\0\u0001\u0030\u0002\0\u0001\u0030\u0008\0" +
+          "\u0001\u0018\u000a\0\u0004\u0030\u0025\0\u0001\u0018\u0011\0\u0001\u0018\u0016\0" +
+          "\u0002\u0018\u0013\0\u0001\u0030\u0001\u0018\u0024\0\u0001\u0030\u0011\0\u0001\u0030" +
+          "\u0026\0\u0001\u0030\u0009\0\u0001\u0030\u000d\0\u0004\u0018\u0002\0\u0002\u0018" +
+          "\u000c\0\u0003\u0018\u0001\u0030\u0001\0\u0002\u0030\u0009\0\u0003\u0018\u0003\0" +
+          "\u0001\u0018\u0001\0\u0001\u0030\u0004\0\u0001\u0030\u0002\u0018\u0005\0\u0004\u0030" +
+          "\u0002\0\u0001\u0018\u0001\u0030\u000a\0\u0004\u0030\u0001\0\u0002\u0018\u0001\0" +
+          "\u0001\u0030\u0007\0\u0001\u0018\u0013\0\u0001\u0018\u0004\0\u0001\u0018\u0006\0" +
+          "\u0001\u0018\u0003\0\u0001\u0018\u0006\0\u0001\u0018\u0005\0\u0001\u0018\u0002\0" +
+          "\u0002\u0018\u0001\0\u000f\u0018\u0002\0\u0001\u0018\u000b\0\u0007\u0018\u0002\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0004\0\u0001\u0030\u0001\0" +
+          "\u0002\u0018\u0005\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0003\u0018\u0001\0" +
+          "\u0001\u0018\u0007\0\u0001\u0018\u0001\0\u0001\u0018\u0016\0\u0001\u0018\u0006\0" +
+          "\u0001\u0018\u0003\0\u0001\u0018\u0003\0\u0001\u0018\u0007\0\u0001\u0018\u0019\0" +
+          "\u0010\u0018\u0005\0\u0003\u0018\u0003\0\u0001\u0018\u0003\0\u0002\u0018\u0002\0" +
+          "\u0002\u0018\u0004\0\u0001\u0018\u0004\u0030\u0004\0\u0001\u0018\u0004\0\u0001\u0018" +
+          "\u0002\0\u0001\u0018\u0004\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0057\0\u0002\u0030\u000d\0\u0004\u0030\u0030\0\u0001\u0030\u000d\0\u0002\u0030" +
+          "\u0008\0\u0002\u0030\u0001\0\u0001\u0030\u0001\0\u0001\u0030\u0009\0\u0001\u0030" +
+          "\u0009\0\u0002\u0030\u0006\0\u0001\u0030\u0002\0\u0004\u0030\u0003\0\u0001\u0030" +
+          "\u0002\0\u0002\u0030\u0001\0\u0003\u0030\u0005\0\u0001\u0030\u0001\0\u0002\u0030" +
+          "\u0002\0\u0002\u0030\u0001\0\u0004\u0030\u0005\0\u0001\u0030\u0001\0\u0002\u0030" +
+          "\u0024\0\u0001\u0094\u0001\0\u0001\u0095\u000f\0\u0001\u0096\u0002\0\u0001\u0097" +
+          "\u0004\0\u0001\u0098\u0003\0\u0001\u0099\u0012\0\u0001\u009a\u0011\0\u0001\u009b" +
+          "\u0002\0\u0001\u009c\u0030\0\u0001\u006c\u0001\u0030\u0006\0\u0001\u006c\u000a\0" +
+          "\u0001\u0018\u0004\0\u0001\u0018\u0009\0\u0001\u0018\u0012\0\u0001\u0018\u0003\0" +
+          "\u0001\u0018\u000b\0\u0001\u0031\u0002\0\u0001\u0031\u0008\0\u0001\u0018\u000a\0" +
+          "\u0004\u0031\u0025\0\u0001\u0018\u0014\0\u0001\u0018\u0003\0\u0004\u0018\u0001\0" +
+          "\u0001\u0018\u0004\0\u0001\u0018\u0001\0\u0002\u0018\u0002\0\u0002\u0018\u0002\0" +
+          "\u0003\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0\u0004\u0018\u0001\0" +
+          "\u0003\u0018\u0001\0\u0001\u0018\u0001\0\u0003\u0018\u0001\0\u0002\u0018\u0001\0" +
+          "\u0004\u0018\u0001\0\u0002\u0018\u0002\0\u0008\u0018\u0001\0\u0002\u0018\u0001\0" +
+          "\u0009\u0018\u0001\0\u0008\u0018\u0001\0\u000b\u0018\u0002\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0002\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0003\0" +
+          "\u0001\u0018\u000f\0\u0001\u0018\u0016\0\u0002\u0018\u0014\0\u0001\u0018\u0024\0" +
+          "\u0001\u0031\u0042\0\u0001\u0031\u000d\0\u0004\u0018\u0002\0\u0002\u0018\u000c\0" +
+          "\u0003\u0018\u0001\u0031\u0001\0\u0002\u0031\u0009\0\u0003\u0018\u0003\0\u0001\u0018" +
+          "\u0001\0\u0001\u0031\u0004\0\u0001\u0031\u0002\u0018\u0005\0\u0004\u0031\u0002\0" +
+          "\u0001\u0018\u0001\u0031\u000f\0\u0002\u0018\u0001\0\u0001\u0031\u0007\0\u0001\u0018" +
+          "\u001f\0\u0001\u0018\u0003\0\u0002\u0018\u000a\0\u0002\u0018\u0001\0\u0003\u0018" +
+          "\u0007\0\u0001\u0018\u0006\0\u0002\u0018\u0001\0\u0002\u0018\u0006\0\u0001\u0018" +
+          "\u0004\0\u0002\u0018\u0002\0\u0002\u0018\u0005\0\u0003\u0018\u0008\0\u0001\u0018" +
+          "\u000e\0\u0001\u0018\u0007\0\u0001\u0018\u0007\0\u0001\u0018\u0013\0\u0001\u0018" +
+          "\u0004\0\u0001\u0018\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0006\0\u0001\u0018" +
+          "\u0005\0\u0001\u0018\u0002\0\u0002\u0018\u0001\0\u000f\u0018\u0002\0\u0001\u0018" +
+          "\u000b\0\u0007\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0004\0\u0001\u0031\u0001\0\u0002\u0018\u0005\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0002\0\u0003\u0018\u0001\0\u0001\u0018\u0007\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0016\0\u0001\u0018\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0003\0\u0001\u0018" +
+          "\u0007\0\u0001\u0018\u0019\0\u0010\u0018\u0005\0\u0003\u0018\u0003\0\u0001\u0018" +
+          "\u0003\0\u0002\u0018\u0002\0\u0002\u0018\u0004\0\u0001\u0018\u0004\u0031\u0004\0" +
+          "\u0001\u0018\u0004\0\u0001\u0018\u0002\0\u0001\u0018\u0004\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0057\0\u0002\u0031\u000d\0\u0004\u0031\u0030\0" +
+          "\u0001\u0031\u000d\0\u0002\u0031\u0008\0\u0002\u0031\u0001\0\u0001\u0031\u0001\0" +
+          "\u0001\u0031\u0009\0\u0001\u0031\u0009\0\u0002\u0031\u0006\0\u0001\u0031\u0002\0" +
+          "\u0004\u0031\u0003\0\u0001\u0031\u0002\0\u0002\u0031\u0001\0\u0003\u0031\u0005\0" +
+          "\u0001\u0031\u0001\0\u0002\u0031\u0002\0\u0002\u0031\u0001\0\u0004\u0031\u0005\0" +
+          "\u0001\u0031\u0001\0\u0002\u0031\u0025\0\u0001\u0018\u0004\0\u0001\u0018\u0009\0" +
+          "\u0001\u0018\u0012\0\u0001\u0018\u0003\0\u0001\u0018\u000b\0\u0001\u0032\u0002\0" +
+          "\u0001\u0032\u0008\0\u0001\u0018\u000a\0\u0004\u0032\u0025\0\u0001\u0018\u0011\0" +
+          "\u0001\u0018\u0016\0\u0002\u0018\u0013\0\u0001\u0030\u0001\u0018\u0024\0\u0001\u0032" +
+          "\u0011\0\u0001\u0030\u0026\0\u0001\u0030\u0009\0\u0001\u0032\u000d\0\u0004\u0018" +
+          "\u0002\0\u0002\u0018\u000c\0\u0003\u0018\u0001\u0032\u0001\0\u0002\u0032\u0009\0" +
+          "\u0003\u0018\u0003\0\u0001\u0018\u0001\0\u0001\u0032\u0004\0\u0001\u0032\u0002\u0018" +
+          "\u0005\0\u0004\u0032\u0002\0\u0001\u0018\u0001\u0032\u000a\0\u0004\u0030\u0001\0" +
+          "\u0002\u0018\u0001\0\u0001\u0032\u0007\0\u0001\u0018\u0013\0\u0001\u0018\u0004\0" +
+          "\u0001\u0018\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0006\0\u0001\u0018\u0005\0" +
+          "\u0001\u0018\u0002\0\u0002\u0018\u0001\0\u000f\u0018\u0002\0\u0001\u0018\u000b\0" +
+          "\u0007\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0004\0" +
+          "\u0001\u0032\u0001\0\u0002\u0018\u0005\0\u0001\u0018\u0001\0\u0001\u0018\u0002\0" +
+          "\u0003\u0018\u0001\0\u0001\u0018\u0007\0\u0001\u0018\u0001\0\u0001\u0018\u0016\0" +
+          "\u0001\u0018\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0003\0\u0001\u0018\u0007\0" +
+          "\u0001\u0018\u0019\0\u0010\u0018\u0005\0\u0003\u0018\u0003\0\u0001\u0018\u0003\0" +
+          "\u0002\u0018\u0002\0\u0002\u0018\u0004\0\u0001\u0018\u0004\u0032\u0004\0\u0001\u0018" +
+          "\u0004\0\u0001\u0018\u0002\0\u0001\u0018\u0004\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0001\0\u0001\u0018\u0057\0\u0002\u0032\u000d\0\u0004\u0032\u0030\0\u0001\u0032" +
+          "\u000d\0\u0002\u0032\u0008\0\u0002\u0032\u0001\0\u0001\u0032\u0001\0\u0001\u0032" +
+          "\u0009\0\u0001\u0032\u0009\0\u0002\u0032\u0006\0\u0001\u0032\u0002\0\u0004\u0032" +
+          "\u0003\0\u0001\u0032\u0002\0\u0002\u0032\u0001\0\u0003\u0032\u0005\0\u0001\u0032" +
+          "\u0001\0\u0002\u0032\u0002\0\u0002\u0032\u0001\0\u0004\u0032\u0005\0\u0001\u0032" +
+          "\u0001\0\u0002\u0032\u0057\0\u0001\u003c\u0002\0\u0001\u003c\u0013\0\u0004\u003c" +
+          "\u0045\0\u0001\u0019\u005a\0\u0001\u0019\u004b\0\u0001\u0019\u0025\0\u0001\u003c" +
+          "\u0011\0\u0001\u0019\u0026\0\u0001\u0019\u0009\0\u0001\u003c\u0024\0\u0001\u003c" +
+          "\u0001\0\u0002\u003c\u0011\0\u0001\u003c\u0004\0\u0001\u003c\u0007\0\u0004\u003c" +
+          "\u0003\0\u0001\u003c\u000a\0\u0004\u0019\u0004\0\u0001\u003c\u0085\0\u0002\u0019" +
+          "\u0078\0\u0001\u003c\u008d\0\u0004\u003c\u006d\0\u0002\u003c\u000d\0\u0004\u003c" +
+          "\u0030\0\u0001\u003c\u000d\0\u0002\u003c\u0008\0\u0002\u003c\u0001\0\u0001\u003c" +
+          "\u0001\0\u0001\u003c\u0009\0\u0001\u003c\u0009\0\u0002\u003c\u0006\0\u0001\u003c" +
+          "\u0002\0\u0004\u003c\u0003\0\u0001\u003c\u0002\0\u0002\u003c\u0001\0\u0003\u003c" +
+          "\u0005\0\u0001\u003c\u0001\0\u0002\u003c\u0002\0\u0002\u003c\u0001\0\u0004\u003c" +
+          "\u0005\0\u0001\u003c\u0001\0\u0002\u003c\u0025\0\u0001\u0018\u0004\0\u0001\u0018" +
+          "\u0009\0\u0001\u0018\u0012\0\u0001\u0018\u0003\0\u0001\u0018\u000b\0\u0001\u003d" +
+          "\u0002\0\u0001\u003d\u0008\0\u0001\u0018\u000a\0\u0004\u003d\u0025\0\u0001\u0018" +
+          "\u0011\0\u0001\u0018\u0016\0\u0002\u0018\u0013\0\u0001\u0019\u0001\u0018\u0024\0" +
+          "\u0001\u003d\u0011\0\u0001\u0019\u0026\0\u0001\u0019\u0009\0\u0001\u003d\u000d\0" +
+          "\u0004\u0018\u0002\0\u0002\u0018\u000c\0\u0003\u0018\u0001\u003d\u0001\0\u0002\u003d" +
+          "\u0009\0\u0003\u0018\u0003\0\u0001\u0018\u0001\0\u0001\u003d\u0004\0\u0001\u003d" +
+          "\u0002\u0018\u0005\0\u0004\u003d\u0002\0\u0001\u0018\u0001\u003d\u000a\0\u0004\u0019" +
+          "\u0001\0\u0002\u0018\u0001\0\u0001\u003d\u0007\0\u0001\u0018\u0013\0\u0001\u0018" +
+          "\u0004\0\u0001\u0018\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0006\0\u0001\u0018" +
+          "\u0005\0\u0001\u0018\u0002\0\u0002\u0018\u0001\0\u000f\u0018\u0002\0\u0001\u0018" +
+          "\u000b\0\u0007\u0018\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0002\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0004\0\u0001\u003d\u0001\0\u0002\u0018\u0005\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0002\0\u0003\u0018\u0001\0\u0001\u0018\u0007\0\u0001\u0018\u0001\0\u0001\u0018" +
+          "\u0016\0\u0001\u0018\u0006\0\u0001\u0018\u0003\0\u0001\u0018\u0003\0\u0001\u0018" +
+          "\u0007\0\u0001\u0018\u0019\0\u0010\u0018\u0005\0\u0003\u0018\u0003\0\u0001\u0018" +
+          "\u0003\0\u0002\u0018\u0002\0\u0002\u0018\u0004\0\u0001\u0018\u0004\u003d\u0004\0" +
+          "\u0001\u0018\u0004\0\u0001\u0018\u0002\0\u0001\u0018\u0004\0\u0001\u0018\u0001\0" +
+          "\u0001\u0018\u0001\0\u0001\u0018\u0057\0\u0002\u003d\u000d\0\u0004\u003d\u0030\0" +
+          "\u0001\u003d\u000d\0\u0002\u003d\u0008\0\u0002\u003d\u0001\0\u0001\u003d\u0001\0" +
+          "\u0001\u003d\u0009\0\u0001\u003d\u0009\0\u0002\u003d\u0006\0\u0001\u003d\u0002\0" +
+          "\u0004\u003d\u0003\0\u0001\u003d\u0002\0\u0002\u003d\u0001\0\u0003\u003d\u0005\0" +
+          "\u0001\u003d\u0001\0\u0002\u003d\u0002\0\u0002\u003d\u0001\0\u0004\u003d\u0005\0" +
+          "\u0001\u003d\u0001\0\u0002\u003d\u0057\0\u0001\u004f\u0002\0\u0001\u004f\u0013\0" +
+          "\u0004\u004f\u0089\0\u0001\u004f\u0042\0\u0001\u004f\u0024\0\u0001\u004f\u0001\0" +
+          "\u0002\u004f\u0011\0\u0001\u004f\u0004\0\u0001\u004f\u0007\0\u0004\u004f\u0003\0" +
+          "\u0001\u004f\u0012\0\u0001\u004f\u0076\0\u0001\u004f\u008d\0\u0004\u004f\u006d\0" +
+          "\u0002\u004f\u000d\0\u0004\u004f\u0030\0\u0001\u004f\u000d\0\u0002\u004f\u0008\0" +
+          "\u0002\u004f\u0001\0\u0001\u004f\u0001\0\u0001\u004f\u0009\0\u0001\u004f\u0009\0" +
+          "\u0002\u004f\u0006\0\u0001\u004f\u0002\0\u0004\u004f\u0003\0\u0001\u004f\u0002\0" +
+          "\u0002\u004f\u0001\0\u0003\u004f\u0005\0\u0001\u004f\u0001\0\u0002\u004f\u0002\0" +
+          "\u0002\u004f\u0001\0\u0004\u004f\u0005\0\u0001\u004f\u0001\0\u0002\u004f\u0057\0" +
+          "\u0001\u006c\u0002\0\u0001\u006c\u0013\0\u0004\u006c\u0045\0\u0001\u0030\u005a\0" +
+          "\u0001\u0030\u004b\0\u0001\u0030\u0025\0\u0001\u006c\u0011\0\u0001\u0030\u0026\0" +
+          "\u0001\u0030\u0009\0\u0001\u006c\u0024\0\u0001\u006c\u0001\0\u0002\u006c\u0011\0" +
+          "\u0001\u006c\u0004\0\u0001\u006c\u0007\0\u0004\u006c\u0003\0\u0001\u006c\u000a\0" +
+          "\u0004\u0030\u0004\0\u0001\u006c\u0085\0\u0002\u0030\u0078\0\u0001\u006c\u008d\0" +
+          "\u0004\u006c\u006d\0\u0002\u006c\u000d\0\u0004\u006c\u0030\0\u0001\u006c\u000d\0" +
+          "\u0002\u006c\u0008\0\u0002\u006c\u0001\0\u0001\u006c\u0001\0\u0001\u006c\u0009\0" +
+          "\u0001\u006c\u0009\0\u0002\u006c\u0006\0\u0001\u006c\u0002\0\u0004\u006c\u0003\0" +
+          "\u0001\u006c\u0002\0\u0002\u006c\u0001\0\u0003\u006c\u0005\0\u0001\u006c\u0001\0" +
+          "\u0002\u006c\u0002\0\u0002\u006c\u0001\0\u0004\u006c\u0005\0\u0001\u006c\u0001\0" +
+          "\u0002\u006c\u001e\0";
+
+        private static int[] zzUnpackTrans()
+        {
+            int[] result = new int[20002];
+            int offset = 0;
+            offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackTrans(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                value--;
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+
+        /* error codes */
+        private const int ZZ_UNKNOWN_ERROR = 0;
+        private const int ZZ_NO_MATCH = 1;
+        private const int ZZ_PUSHBACK_2BIG = 2;
+
+        /* error messages for the codes above */
+        private static readonly String[] ZZ_ERROR_MSG = {
+    "Unkown internal scanner error",
+    "Error: could not match input",
+    "Error: pushback value was too large"
+  };
+
+        /**
+         * ZZ_ATTRIBUTE[aState] contains the attributes of state <code>aState</code>
+         */
+        private static readonly int[] ZZ_ATTRIBUTE = zzUnpackAttribute();
+
+        private const String ZZ_ATTRIBUTE_PACKED_0 =
+          "\u0001\0\u0001\u0009\u001d\u0001\u0010\0\u0001\u0001\u0001\0\u0001\u0001\u000a\0" +
+          "\u0001\u0001\u0011\0\u0001\u0001\u004d\0";
+
+        private static int[] zzUnpackAttribute()
+        {
+            int[] result = new int[156];
+            int offset = 0;
+            offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackAttribute(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+        /** the input device */
+        private TextReader zzReader;
+
+        /** the current state of the DFA */
+        private int zzState;
+
+        /** the current lexical state */
+        private int zzLexicalState = YYINITIAL;
+
+        /** this buffer contains the current text to be matched and is
+            the source of the yytext() string */
+        private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
+
+        /** the textposition at the last accepting state */
+        private int zzMarkedPos;
+
+        /** the current text position in the buffer */
+        private int zzCurrentPos;
+
+        /** startRead marks the beginning of the yytext() string in the buffer */
+        private int zzStartRead;
+
+        /** endRead marks the last character in the buffer, that has been read
+            from input */
+        private int zzEndRead;
+
+        /** number of newlines encountered up to the start of the matched text */
+        private int yyline;
+
+        /** the number of characters up to the start of the matched text */
+        private int yychar;
+
+        /**
+         * the number of characters from the last newline up to the start of the 
+         * matched text
+         */
+        private int yycolumn;
+
+        /** 
+         * zzAtBOL == true <=> the scanner is currently at the beginning of a line
+         */
+        private bool zzAtBOL = true;
+
+        /** zzAtEOF == true <=> the scanner is at the EOF */
+        private bool zzAtEOF;
+
+        /** denotes if the user-EOF-code has already been executed */
+        private bool zzEOFDone;
+
+        /* user code: */
+        /** Alphanumeric sequences */
+        public const int WORD_TYPE = StandardTokenizer.ALPHANUM;
+
+        /** Numbers */
+        public const int NUMERIC_TYPE = StandardTokenizer.NUM;
+
+        /**
+         * Chars in class \p{Line_Break = Complex_Context} are from South East Asian
+         * scripts (Thai, Lao, Myanmar, Khmer, etc.).  Sequences of these are kept 
+         * together as as a single token rather than broken up, because the logic
+         * required to break them at word boundaries is too complex for UAX#29.
+         * <p>
+         * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
+         */
+        public const int SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.SOUTHEAST_ASIAN;
+
+        public const int IDEOGRAPHIC_TYPE = StandardTokenizer.IDEOGRAPHIC;
+
+        public const int HIRAGANA_TYPE = StandardTokenizer.HIRAGANA;
+
+        public const int KATAKANA_TYPE = StandardTokenizer.KATAKANA;
+
+        public const int HANGUL_TYPE = StandardTokenizer.HANGUL;
+
+        public int YYChar
+        {
+            get { return yychar; }
+        }
+
+        /**
+         * Fills CharTermAttribute with the current token text.
+         */
+        public void GetText(ICharTermAttribute t)
+        {
+            t.CopyBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+
+
+        /**
+         * Creates a new scanner
+         * There is also a java.io.InputStream version of this constructor.
+         *
+         * @param   in  the TextReader to read input from.
+         */
+        public StandardTokenizerImpl(TextReader input)
+        {
+            this.zzReader = input;
+        }
+
+
+
+        /** 
+         * Unpacks the compressed character translation table.
+         *
+         * @param packed   the packed character translation table
+         * @return         the unpacked character translation table
+         */
+        private static char[] zzUnpackCMap(String packed)
+        {
+            char[] map = new char[0x10000];
+            int i = 0;  /* index in packed string  */
+            int j = 0;  /* index in unpacked array */
+            while (i < 2848)
+            {
+                int count = packed[i++];
+                char value = packed[i++];
+                do map[j++] = value; while (--count > 0);
+            }
+            return map;
+        }
+
+
+        /**
+         * Refills the input buffer.
+         *
+         * @return      <code>false</code>, iff there was new input.
+         * 
+         * @exception   java.io.IOException  if any I/O-Error occurs
+         */
+        private bool zzRefill()
+        {
+
+            /* first: make room (if you can) */
+            if (zzStartRead > 0)
+            {
+                Array.Copy(zzBuffer, zzStartRead,
+                                 zzBuffer, 0,
+                                 zzEndRead - zzStartRead);
+
+                /* translate stored positions */
+                zzEndRead -= zzStartRead;
+                zzCurrentPos -= zzStartRead;
+                zzMarkedPos -= zzStartRead;
+                zzStartRead = 0;
+            }
+
+            /* is the buffer big enough? */
+            if (zzCurrentPos >= zzBuffer.Length)
+            {
+                /* if not: blow it up */
+                char[] newBuffer = new char[zzCurrentPos * 2];
+                Array.Copy(zzBuffer, 0, newBuffer, 0, zzBuffer.Length);
+                zzBuffer = newBuffer;
+            }
+
+            /* finally: fill the buffer with new input */
+            int numRead = zzReader.Read(zzBuffer, zzEndRead,
+                                                    zzBuffer.Length - zzEndRead);
+
+            if (numRead > 0)
+            {
+                zzEndRead += numRead;
+                return false;
+            }
+            // unlikely but not impossible: read 0 characters, but not at end of stream    
+            if (numRead == 0)
+            {
+                int c = zzReader.Read();
+                if (c <= 0)
+                {
+                    return true;
+                }
+                else
+                {
+                    zzBuffer[zzEndRead++] = (char)c;
+                    return false;
+                }
+            }
+
+            // numRead < 0
+            return true;
+        }
+
+
+        /**
+         * Closes the input stream.
+         */
+        public void yyclose()
+        {
+            zzAtEOF = true;            /* indicate end of file */
+            zzEndRead = zzStartRead;  /* invalidate buffer    */
+
+            if (zzReader != null)
+                zzReader.Close();
+        }
+
+
+        /**
+         * Resets the scanner to read from a new input stream.
+         * Does not close the old reader.
+         *
+         * All internal variables are reset, the old input stream 
+         * <b>cannot</b> be reused (internal buffer is discarded and lost).
+         * Lexical state is set to <tt>ZZ_INITIAL</tt>.
+         *
+         * Internal scan buffer is resized down to its initial length, if it has grown.
+         *
+         * @param reader   the new input stream 
+         */
+
+        public void YYReset(TextReader reader)
+        {
+            zzReader = reader;
+            zzAtBOL = true;
+            zzAtEOF = false;
+            zzEOFDone = false;
+            zzEndRead = zzStartRead = 0;
+            zzCurrentPos = zzMarkedPos = 0;
+            yyline = yychar = yycolumn = 0;
+            zzLexicalState = YYINITIAL;
+            if (zzBuffer.Length > ZZ_BUFFERSIZE)
+                zzBuffer = new char[ZZ_BUFFERSIZE];
+        }
+
+
+        /**
+         * Returns the current lexical state.
+         */
+        public int yystate()
+        {
+            return zzLexicalState;
+        }
+
+
+        /**
+         * Enters a new lexical state
+         *
+         * @param newState the new lexical state
+         */
+        public void yybegin(int newState)
+        {
+            zzLexicalState = newState;
+        }
+
+
+        /**
+         * Returns the text matched by the current regular expression.
+         */
+        public String yytext()
+        {
+            return new String(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+
+
+        /**
+         * Returns the character at position <tt>pos</tt> from the 
+         * matched text. 
+         * 
+         * It is equivalent to yytext().charAt(pos), but faster
+         *
+         * @param pos the position of the character to fetch. 
+         *            A value from 0 to yylength()-1.
+         *
+         * @return the character at position pos
+         */
+        public char yycharat(int pos)
+        {
+            return zzBuffer[zzStartRead + pos];
+        }
+
+
+        /**
+         * Returns the length of the matched text region.
+         */
+
+        public int YYLength
+        {
+            get
+            {
+                return zzMarkedPos - zzStartRead;
+            }
+        }
+
+
+        /**
+         * Reports an error that occured while scanning.
+         *
+         * In a wellformed scanner (no or only correct usage of 
+         * yypushback(int) and a match-all fallback rule) this method 
+         * will only be called with things that "Can't Possibly Happen".
+         * If this method is called, something is seriously wrong
+         * (e.g. a JFlex bug producing a faulty scanner etc.).
+         *
+         * Usual syntax/scanner level error handling should be done
+         * in error fallback rules.
+         *
+         * @param   errorCode  the code of the errormessage to display
+         */
+        private void zzScanError(int errorCode)
+        {
+            String message;
+            try
+            {
+                message = ZZ_ERROR_MSG[errorCode];
+            }
+            catch (IndexOutOfRangeException e)
+            {
+                message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR];
+            }
+
+            throw new Exception(message);
+        }
+
+
+        /**
+         * Pushes the specified amount of characters back into the input stream.
+         *
+         * They will be read again by then next call of the scanning method
+         *
+         * @param number  the number of characters to be read again.
+         *                This number must not be greater than yylength()!
+         */
+        public void yypushback(int number)
+        {
+            if (number > YYLength)
+                zzScanError(ZZ_PUSHBACK_2BIG);
+
+            zzMarkedPos -= number;
+        }
+
+
+        /**
+         * Resumes scanning until the next regular expression is matched,
+         * the end of input is encountered or an I/O-Error occurs.
+         *
+         * @return      the next token
+         * @exception   java.io.IOException  if any I/O-Error occurs
+         */
+
+        public int GetNextToken()
+        {
+            int zzInput;
+            int zzAction;
+
+            // cached fields:
+            int zzCurrentPosL;
+            int zzMarkedPosL;
+            int zzEndReadL = zzEndRead;
+            char[] zzBufferL = zzBuffer;
+            char[] zzCMapL = ZZ_CMAP;
+
+            int[] zzTransL = ZZ_TRANS;
+            int[] zzRowMapL = ZZ_ROWMAP;
+            int[] zzAttrL = ZZ_ATTRIBUTE;
+
+            while (true)
+            {
+                zzMarkedPosL = zzMarkedPos;
+
+                yychar += zzMarkedPosL - zzStartRead;
+
+                zzAction = -1;
+
+                zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL;
+
+                zzState = ZZ_LEXSTATE[zzLexicalState];
+
+                // set up zzAction for empty match case:
+                int zzAttributes = zzAttrL[zzState];
+                if ((zzAttributes & 1) == 1)
+                {
+                    zzAction = zzState;
+                }
+
+
+                //zzForAction: 
+                {
+                    while (true)
+                    {
+
+                        if (zzCurrentPosL < zzEndReadL)
+                            zzInput = zzBufferL[zzCurrentPosL++];
+                        else if (zzAtEOF)
+                        {
+                            zzInput = YYEOF;
+                            break;
+                        }
+                        else
+                        {
+                            // store back cached positions
+                            zzCurrentPos = zzCurrentPosL;
+                            zzMarkedPos = zzMarkedPosL;
+                            bool eof = zzRefill();
+                            // get translated positions and possibly new buffer
+                            zzCurrentPosL = zzCurrentPos;
+                            zzMarkedPosL = zzMarkedPos;
+                            zzBufferL = zzBuffer;
+                            zzEndReadL = zzEndRead;
+                            if (eof)
+                            {
+                                zzInput = YYEOF;
+                                break;
+                            }
+                            else
+                            {
+                                zzInput = zzBufferL[zzCurrentPosL++];
+                            }
+                        }
+                        int zzNext = zzTransL[zzRowMapL[zzState] + zzCMapL[zzInput]];
+                        if (zzNext == -1) break;
+                        zzState = zzNext;
+
+                        zzAttributes = zzAttrL[zzState];
+                        if ((zzAttributes & 1) == 1)
+                        {
+                            zzAction = zzState;
+                            zzMarkedPosL = zzCurrentPosL;
+                            if ((zzAttributes & 8) == 8) break;
+                        }
+
+                    }
+                }
+
+                // store back cached position
+                zzMarkedPos = zzMarkedPosL;
+
+                switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction])
+                {
+                    case 1:
+                        { /* Break so we don't hit fall-through warning: */
+                            break; /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
+                        }
+                    case 9: break;
+                    case 2:
+                        {
+                            return WORD_TYPE;
+                        }
+                    case 10: break;
+                    case 3:
+                        {
+                            return NUMERIC_TYPE;
+                        }
+                    case 11: break;
+                    case 4:
+                        {
+                            return KATAKANA_TYPE;
+                        }
+                    case 12: break;
+                    case 5:
+                        {
+                            return SOUTH_EAST_ASIAN_TYPE;
+                        }
+                    case 13: break;
+                    case 6:
+                        {
+                            return IDEOGRAPHIC_TYPE;
+                        }
+                    case 14: break;
+                    case 7:
+                        {
+                            return HIRAGANA_TYPE;
+                        }
+                    case 15: break;
+                    case 8:
+                        {
+                            return HANGUL_TYPE;
+                        }
+                    case 16: break;
+                    default:
+                        if (zzInput == YYEOF && zzStartRead == zzCurrentPos)
+                        {
+                            zzAtEOF = true;
+                            {
+                                return StandardTokenizerInterface.YYEOF;
+                            }
+                        }
+                        else
+                        {
+                            zzScanError(ZZ_NO_MATCH);
+                        }
+                        break;
+                }
+            }
+        }
+
+    }
+}


[31/50] [abbrv] git commit: Minor bugfix for reader type logic

Posted by mh...@apache.org.
Minor bugfix for reader type logic


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/02797a95
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/02797a95
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/02797a95

Branch: refs/heads/branch_4x
Commit: 02797a95e97b1ca822ea2c333a61b92dd64d77db
Parents: e9c6e03
Author: Paul Irwin <pa...@gmail.com>
Authored: Tue Aug 6 22:58:58 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Aug 6 22:58:58 2013 -0400

----------------------------------------------------------------------
 src/core/Index/CompositeReaderContext.cs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/02797a95/src/core/Index/CompositeReaderContext.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CompositeReaderContext.cs b/src/core/Index/CompositeReaderContext.cs
index 12e2f4f..eb965fa 100644
--- a/src/core/Index/CompositeReaderContext.cs
+++ b/src/core/Index/CompositeReaderContext.cs
@@ -88,7 +88,7 @@ namespace Lucene.Net.Index
 
             private IndexReaderContext Build(CompositeReaderContext parent, IndexReader reader, int ord, int docBase)
             {
-                if (reader.GetType() == typeof(AtomicReader))
+                if (reader is AtomicReader)
                 {
                     AtomicReader ar = (AtomicReader)reader;
                     AtomicReaderContext atomic = new AtomicReaderContext(parent, ar, ord, docBase, leaves.Count, leafDocBase);


[29/50] [abbrv] git commit: Fix bug with detecting fields

Posted by mh...@apache.org.
Fix bug with detecting fields


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/12606ffb
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/12606ffb
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/12606ffb

Branch: refs/heads/branch_4x
Commit: 12606ffb2955b01fe8b72acdd4c613161f6aeb02
Parents: ec36d0d
Author: Paul Irwin <pa...@gmail.com>
Authored: Tue Aug 6 18:43:23 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Aug 6 18:43:23 2013 -0400

----------------------------------------------------------------------
 src/core/Codecs/BlockTreeTermsReader.cs | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/12606ffb/src/core/Codecs/BlockTreeTermsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/BlockTreeTermsReader.cs b/src/core/Codecs/BlockTreeTermsReader.cs
index 0c42784..f0148ce 100644
--- a/src/core/Codecs/BlockTreeTermsReader.cs
+++ b/src/core/Codecs/BlockTreeTermsReader.cs
@@ -100,11 +100,12 @@ namespace Lucene.Net.Codecs
                         throw new CorruptIndexException("invalid sumTotalTermFreq: " + sumTotalTermFreq + " sumDocFreq: " + sumDocFreq + " (resource=" + input + ")");
                     }
                     long indexStartFP = indexDivisor != -1 ? indexIn.ReadVLong() : 0;
-                    FieldReader previous = fields[fieldInfo.name] = new FieldReader(this, fieldInfo, numTerms, rootCode, sumTotalTermFreq, sumDocFreq, docCount, indexStartFP, indexIn);
-                    if (previous != null)
+                    FieldReader previous;
+                    if (fields.TryGetValue(fieldInfo.name, out previous))
                     {
                         throw new CorruptIndexException("duplicate field: " + fieldInfo.name + " (resource=" + input + ")");
                     }
+                    fields[fieldInfo.name] = new FieldReader(this, fieldInfo, numTerms, rootCode, sumTotalTermFreq, sumDocFreq, docCount, indexStartFP, indexIn);
                 }
                 if (indexDivisor != -1)
                 {


[20/50] [abbrv] Massive cleanup, reducing compiler errors

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressingTermVectorsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingTermVectorsReader.cs b/src/core/Codecs/Compressing/CompressingTermVectorsReader.cs
index 0de0f4f..5d00b87 100644
--- a/src/core/Codecs/Compressing/CompressingTermVectorsReader.cs
+++ b/src/core/Codecs/Compressing/CompressingTermVectorsReader.cs
@@ -10,36 +10,36 @@ using System.Text;
 
 namespace Lucene.Net.Codecs.Compressing
 {
-    public sealed class CompressingTermVectorsReader: IDisposable, TermVectorsReader
+    public sealed class CompressingTermVectorsReader : TermVectorsReader, IDisposable
     {
-        private FieldInfos fieldInfos;
-        CompressingStoredFieldsIndexReader indexReader;
-        IndexInput vectorsStream;
-        private int packedIntsVersion;
-        private CompressionMode compressionMode;
-        private Decompressor decompressor;
-        private int chunkSize;
-        private int numDocs;
+        private readonly FieldInfos fieldInfos;
+        internal readonly CompressingStoredFieldsIndexReader indexReader;
+        internal readonly IndexInput vectorsStream;
+        private readonly int packedIntsVersion;
+        private readonly CompressionMode compressionMode;
+        private readonly Decompressor decompressor;
+        private readonly int chunkSize;
+        private readonly int numDocs;
         private bool closed;
-        private BlockPackedReaderIterator reader;
-        
+        private readonly BlockPackedReaderIterator reader;
+
         private CompressingTermVectorsReader(CompressingTermVectorsReader reader)
         {
             this.fieldInfos = reader.fieldInfos;
             this.vectorsStream = (IndexInput)reader.vectorsStream.Clone();
-            this.indexReader = reader.indexReader.clone();
+            this.indexReader = (CompressingStoredFieldsIndexReader)reader.indexReader.Clone();
             this.packedIntsVersion = reader.packedIntsVersion;
             this.compressionMode = reader.compressionMode;
             this.decompressor = (Decompressor)reader.decompressor.Clone();
             this.chunkSize = reader.chunkSize;
             this.numDocs = reader.numDocs;
-            this.reader = new BlockPackedReaderIterator(vectorsStream, packedIntsVersion, BLOCK_SIZE, 0);
+            this.reader = new BlockPackedReaderIterator(vectorsStream, packedIntsVersion, CompressingTermVectorsWriter.BLOCK_SIZE, 0);
             this.closed = false;
         }
 
-          /** Sole constructor. */
+        /** Sole constructor. */
         public CompressingTermVectorsReader(Directory d, SegmentInfo si, String segmentSuffix, FieldInfos fn,
-            IOContext context, String formatName, CompressionMode compressionMode) 
+            IOContext context, String formatName, CompressionMode compressionMode)
         {
             this.compressionMode = compressionMode;
             string segment = si.name;
@@ -47,15 +47,16 @@ namespace Lucene.Net.Codecs.Compressing
             fieldInfos = fn;
             numDocs = si.DocCount;
             IndexInput indexStream = null;
-            try {
-                vectorsStream = d.OpenInput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION), context);
-                string indexStreamFN = IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION);
+            try
+            {
+                vectorsStream = d.OpenInput(IndexFileNames.SegmentFileName(segment, segmentSuffix, CompressingTermVectorsWriter.VECTORS_EXTENSION), context);
+                string indexStreamFN = IndexFileNames.SegmentFileName(segment, segmentSuffix, CompressingTermVectorsWriter.VECTORS_INDEX_EXTENSION);
                 indexStream = d.OpenInput(indexStreamFN, context);
 
-                string codecNameIdx = formatName + CODEC_SFX_IDX;
-                string codecNameDat = formatName + CODEC_SFX_DAT;
-                CodecUtil.CheckHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT);
-                CodecUtil.CheckHeader(vectorsStream, codecNameDat, VERSION_START, VERSION_CURRENT);
+                string codecNameIdx = formatName + CompressingTermVectorsWriter.CODEC_SFX_IDX;
+                string codecNameDat = formatName + CompressingTermVectorsWriter.CODEC_SFX_DAT;
+                CodecUtil.CheckHeader(indexStream, codecNameIdx, CompressingTermVectorsWriter.VERSION_START, CompressingTermVectorsWriter.VERSION_CURRENT);
+                CodecUtil.CheckHeader(vectorsStream, codecNameDat, CompressingTermVectorsWriter.VERSION_START, CompressingTermVectorsWriter.VERSION_CURRENT);
 
                 indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
                 indexStream = null;
@@ -63,50 +64,71 @@ namespace Lucene.Net.Codecs.Compressing
                 packedIntsVersion = vectorsStream.ReadVInt();
                 chunkSize = vectorsStream.ReadVInt();
                 decompressor = compressionMode.newDecompressor();
-                this.reader = new BlockPackedReaderIterator(vectorsStream, packedIntsVersion, BLOCK_SIZE, 0);
+                this.reader = new BlockPackedReaderIterator(vectorsStream, packedIntsVersion, CompressingTermVectorsWriter.BLOCK_SIZE, 0);
 
                 success = true;
-            } finally {
-                if (!success) {
-                IOUtils.CloseWhileHandlingException(this, indexStream);
+            }
+            finally
+            {
+                if (!success)
+                {
+                    IOUtils.CloseWhileHandlingException((IDisposable)this, indexStream);
                 }
             }
         }
 
-        CompressionMode getCompressionMode() 
+        internal CompressionMode CompressionMode
         {
-            return compressionMode;
+            get
+            {
+                return compressionMode;
+            }
         }
 
-        int getChunkSize() {
-            return chunkSize;
+        internal int ChunkSize
+        {
+            get
+            {
+                return chunkSize;
+            }
         }
 
-        int getPackedIntsVersion() {
-            return packedIntsVersion;
+        internal int PackedIntsVersion
+        {
+            get
+            {
+                return packedIntsVersion;
+            }
         }
 
-        CompressingStoredFieldsIndexReader getIndex() {
-            return indexReader;
+        internal CompressingStoredFieldsIndexReader Index
+        {
+            get
+            {
+                return indexReader;
+            }
         }
 
-        IndexInput getVectorsStream() {
-            return vectorsStream;
+        internal IndexInput VectorsStream
+        {
+            get
+            {
+                return vectorsStream;
+            }
         }
 
         /**
         * @throws AlreadyClosedException if this TermVectorsReader is closed
         */
-        private void ensureOpen()
+        private void EnsureOpen()
         {
-            if (closed) {
+            if (closed)
+            {
                 throw new AlreadyClosedException("this FieldsReader is closed");
             }
         }
-
-
-
-        public void Dispose()
+        
+        protected override void Dispose(bool disposing)
         {
             if (!closed)
             {
@@ -114,15 +136,20 @@ namespace Lucene.Net.Codecs.Compressing
                 closed = true;
             }
         }
+        
+        public override object Clone()
+        {
+            return new CompressingTermVectorsReader(this);
+        }
 
         public override Index.Fields Get(int doc)
         {
-            ensureOpen();
+            EnsureOpen();
 
             // seek to the right place
             {
-              long startPointer = indexReader.GetStartPointer(doc);
-              vectorsStream.Seek(startPointer);
+                long startPointer = indexReader.GetStartPointer(doc);
+                vectorsStream.Seek(startPointer);
             }
 
             // decode
@@ -130,94 +157,107 @@ namespace Lucene.Net.Codecs.Compressing
             // - chunkDocs: number of docs of the chunk
             int docBase = vectorsStream.ReadVInt();
             int chunkDocs = vectorsStream.ReadVInt();
-            if (doc < docBase || doc >= docBase + chunkDocs || docBase + chunkDocs > numDocs) {
-              throw new CorruptIndexException("docBase=" + docBase + ",chunkDocs=" + chunkDocs + ",doc=" + doc);
-            }
-
-            long skip; // number of fields to skip
-            long numFields; // number of fields of the document we're looking for
-            long totalFields; // total number of fields of the chunk (sum for all docs)
-            if (chunkDocs == 1) {
-              skip = 0;
-              numFields = totalFields = vectorsStream.ReadVInt();
-            } else {
-              reader.Reset(vectorsStream, chunkDocs);
-              long sum = 0;
-              for (int i = docBase; i < doc; ++i) {
-                sum += reader.Next();
-              }
-              skip = sum;
-              numFields = (int) reader.Next();
-              sum += numFields;
-              for (int i = doc + 1; i < docBase + chunkDocs; ++i) {
-                sum += reader.Next();
-              }
-              totalFields = sum;
-            }
-
-            if (numFields == 0) {
-              // no vectors
-              return null;
+            if (doc < docBase || doc >= docBase + chunkDocs || docBase + chunkDocs > numDocs)
+            {
+                throw new CorruptIndexException("docBase=" + docBase + ",chunkDocs=" + chunkDocs + ",doc=" + doc);
+            }
+
+            int skip; // number of fields to skip
+            int numFields; // number of fields of the document we're looking for
+            int totalFields; // total number of fields of the chunk (sum for all docs)
+            if (chunkDocs == 1)
+            {
+                skip = 0;
+                numFields = totalFields = vectorsStream.ReadVInt();
+            }
+            else
+            {
+                reader.Reset(vectorsStream, chunkDocs);
+                int sum = 0;
+                for (int i = docBase; i < doc; ++i)
+                {
+                    sum += (int)reader.Next();
+                }
+                skip = sum;
+                numFields = (int)reader.Next();
+                sum += numFields;
+                for (int i = doc + 1; i < docBase + chunkDocs; ++i)
+                {
+                    sum += (int)reader.Next();
+                }
+                totalFields = sum;
+            }
+
+            if (numFields == 0)
+            {
+                // no vectors
+                return null;
             }
 
             // read field numbers that have term vectors
             int[] fieldNums;
             {
-              int token = vectorsStream.ReadByte() & 0xFF;
-              int bitsPerFieldNum = token & 0x1F;
-              int totalDistinctFields = Number.URShift(token, 5);
-              if (totalDistinctFields == 0x07) {
-                totalDistinctFields += vectorsStream.ReadVInt();
-              }
-              ++totalDistinctFields;
-              PackedInts.ReaderIterator it = PackedInts.GetReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalDistinctFields, bitsPerFieldNum, 1);
-              fieldNums = new int[totalDistinctFields];
-              for (int i = 0; i < totalDistinctFields; ++i) {
-                fieldNums[i] = (int) it.Next();
-              }
+                int token = vectorsStream.ReadByte() & 0xFF;
+                int bitsPerFieldNum = token & 0x1F;
+                int totalDistinctFields = Number.URShift(token, 5);
+                if (totalDistinctFields == 0x07)
+                {
+                    totalDistinctFields += vectorsStream.ReadVInt();
+                }
+                ++totalDistinctFields;
+                PackedInts.IReaderIterator it = PackedInts.GetReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalDistinctFields, bitsPerFieldNum, 1);
+                fieldNums = new int[totalDistinctFields];
+                for (int i = 0; i < totalDistinctFields; ++i)
+                {
+                    fieldNums[i] = (int)it.Next();
+                }
             }
 
             // read field numbers and flags
             int[] fieldNumOffs = new int[numFields];
-            PackedInts.Reader flags;
-            {
-              int bitsPerOff = PackedInts.BitsRequired(fieldNums.Length - 1);
-              PackedInts.Reader allFieldNumOffs = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, bitsPerOff);
-              switch (vectorsStream.ReadVInt()) {
-                case 0:
-                  PackedInts.Reader fieldFlags = PackedInts.getReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, fieldNums.Length, FLAGS_BITS);
-                  PackedInts.Mutable f = PackedInts.GetMutable(totalFields, FLAGS_BITS, PackedInts.COMPACT);
-                  for (int i = 0; i < totalFields; ++i) {
-                    int fieldNumOff = (int) allFieldNumOffs.Get(i);
-                    int fgs = (int) fieldFlags.Get(fieldNumOff);
-                    f.Set(i, fgs);
-                  }
-                  flags = f;
-                  break;
-                case 1:
-                  flags = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, FLAGS_BITS);
-                  break;
-                default:
-                  throw new AssertionError();
-              }
-              for (int i = 0; i < numFields; ++i) {
-                //hackmp - TODO - NEEDS REVIEW
-                //Here again, seems to be a larger impact to change all ints to long, than simply cast.  Will need Pual to review..
-                fieldNumOffs[i] = (int) allFieldNumOffs.Get((int)skip + i);
-              }
+            PackedInts.IReader flags;
+            {
+                int bitsPerOff = PackedInts.BitsRequired(fieldNums.Length - 1);
+                PackedInts.IReader allFieldNumOffs = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, bitsPerOff);
+                switch (vectorsStream.ReadVInt())
+                {
+                    case 0:
+                        PackedInts.IReader fieldFlags = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, fieldNums.Length, CompressingTermVectorsWriter.FLAGS_BITS);
+                        PackedInts.IMutable f = PackedInts.GetMutable((int)totalFields, CompressingTermVectorsWriter.FLAGS_BITS, PackedInts.COMPACT);
+                        for (int i = 0; i < totalFields; ++i)
+                        {
+                            int fieldNumOff = (int)allFieldNumOffs.Get(i);
+                            int fgs = (int)fieldFlags.Get(fieldNumOff);
+                            f.Set(i, fgs);
+                        }
+                        flags = f;
+                        break;
+                    case 1:
+                        flags = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, CompressingTermVectorsWriter.FLAGS_BITS);
+                        break;
+                    default:
+                        throw new InvalidOperationException();
+                }
+                for (int i = 0; i < numFields; ++i)
+                {
+                    //hackmp - TODO - NEEDS REVIEW
+                    //Here again, seems to be a larger impact to change all ints to long, than simply cast.  Will need Pual to review..
+                    fieldNumOffs[i] = (int)allFieldNumOffs.Get((int)skip + i);
+                }
             }
 
             // number of terms per field for all fields
-            PackedInts.Reader numTerms;
+            PackedInts.IReader numTerms;
             long totalTerms;
             {
-              int bitsRequired = vectorsStream.ReadVInt();
-              numTerms = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, bitsRequired);
-              long sum = 0;
-              for (int i = 0; i < totalFields; ++i) {
-                sum += numTerms.Get(i);
-              }
-              totalTerms = sum;
+                int bitsRequired = vectorsStream.ReadVInt();
+                numTerms = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, bitsRequired);
+                long sum = 0;
+                for (int i = 0; i < totalFields; ++i)
+                {
+                    sum += numTerms.Get(i);
+                }
+                totalTerms = sum;
             }
 
             // term lengths
@@ -226,162 +266,196 @@ namespace Lucene.Net.Codecs.Compressing
             int[][] prefixLengths = new int[numFields][];
             int[][] suffixLengths = new int[numFields][];
             {
-              reader.Reset(vectorsStream, totalTerms);
-              // skip
-              long toSkip = 0;
-              for (int i = 0; i < skip; ++i) {
-                toSkip += numTerms.Get(i);
-              }
-              reader.Skip(toSkip);
-              // read prefix lengths
-              for (int i = 0; i < numFields; ++i) {
-                //hackmp - TODO - NEEDS REVIEW
-                //casting long to int
-                long termCount = (int) numTerms.Get((int)skip + i);
-                int[] fieldPrefixLengths = new int[termCount];
-                prefixLengths[i] = fieldPrefixLengths;
-                for (int j = 0; j < termCount; ) {
-                  //hackmp - TODO - NEEDS REVIEW
-                  //casting long to int..
-                  LongsRef next = reader.Next((int)termCount - j);
-                  for (int k = 0; k < next.length; ++k) {
-                    fieldPrefixLengths[j++] = (int) next.longs[next.offset + k];
-                  }
-                }
-              }
-              reader.Skip(totalTerms - reader.Ord);
-
-              reader.Reset(vectorsStream, totalTerms);
-              // skip
-              toSkip = 0;
-              for (int i = 0; i < skip; ++i) {
-                for (int j = 0; j < numTerms.Get(i); ++j) {
-                  docOff += reader.Next();
-                }
-              }
-              for (int i = 0; i < numFields; ++i) {
-                  //HACKMP - TODO - NEEDS REVIEW
-                  //..and again, casting long to int
-                int termCount = (int) numTerms.Get((int)skip + i);
-                int[] fieldSuffixLengths = new int[termCount];
-                suffixLengths[i] = fieldSuffixLengths;
-                for (int j = 0; j < termCount; ) {
-                  LongsRef next = reader.Next(termCount - j);
-                  for (int k = 0; k < next.length; ++k) {
-                    fieldSuffixLengths[j++] = (int) next.longs[next.offset + k];
-                  }
-                }
-                fieldLengths[i] = sum(suffixLengths[i]);
-                docLen += fieldLengths[i];
-              }     
-              totalLen = docOff + docLen;
-              for (long i = skip + numFields; i < totalFields; ++i) {
-                  //hackmp - TODO - NEEDS REVIEW
-                  //long > int
-                for (int j = 0; j < numTerms.Get((int)i); ++j) 
-                {
-                  totalLen += reader.Next();
-                }
-              }
+                reader.Reset(vectorsStream, totalTerms);
+                // skip
+                long toSkip = 0;
+                for (int i = 0; i < skip; ++i)
+                {
+                    toSkip += numTerms.Get(i);
+                }
+                reader.Skip(toSkip);
+                // read prefix lengths
+                for (int i = 0; i < numFields; ++i)
+                {
+                    //hackmp - TODO - NEEDS REVIEW
+                    //casting long to int
+                    long termCount = (int)numTerms.Get((int)skip + i);
+                    int[] fieldPrefixLengths = new int[termCount];
+                    prefixLengths[i] = fieldPrefixLengths;
+                    for (int j = 0; j < termCount; )
+                    {
+                        //hackmp - TODO - NEEDS REVIEW
+                        //casting long to int..
+                        LongsRef next = reader.Next((int)termCount - j);
+                        for (int k = 0; k < next.length; ++k)
+                        {
+                            fieldPrefixLengths[j++] = (int)next.longs[next.offset + k];
+                        }
+                    }
+                }
+                reader.Skip(totalTerms - reader.Ord);
+
+                reader.Reset(vectorsStream, totalTerms);
+                // skip
+                toSkip = 0;
+                for (int i = 0; i < skip; ++i)
+                {
+                    for (int j = 0; j < numTerms.Get(i); ++j)
+                    {
+                        docOff += reader.Next();
+                    }
+                }
+                for (int i = 0; i < numFields; ++i)
+                {
+                    //HACKMP - TODO - NEEDS REVIEW
+                    //..and again, casting long to int
+                    int termCount = (int)numTerms.Get((int)skip + i);
+                    int[] fieldSuffixLengths = new int[termCount];
+                    suffixLengths[i] = fieldSuffixLengths;
+                    for (int j = 0; j < termCount; )
+                    {
+                        LongsRef next = reader.Next(termCount - j);
+                        for (int k = 0; k < next.length; ++k)
+                        {
+                            fieldSuffixLengths[j++] = (int)next.longs[next.offset + k];
+                        }
+                    }
+                    fieldLengths[i] = Sum(suffixLengths[i]);
+                    docLen += fieldLengths[i];
+                }
+                totalLen = docOff + docLen;
+                for (long i = skip + numFields; i < totalFields; ++i)
+                {
+                    //hackmp - TODO - NEEDS REVIEW
+                    //long > int
+                    for (int j = 0; j < numTerms.Get((int)i); ++j)
+                    {
+                        totalLen += reader.Next();
+                    }
+                }
             }
 
             // term freqs
             int[] termFreqs = new int[totalTerms];
             {
-              reader.Reset(vectorsStream, totalTerms);
-              for (int i = 0; i < totalTerms; ) {
-                //hackmp - TODO - NEEDS REVIEW
-                //long > int
-                LongsRef next = reader.Next((int)totalTerms - i);
-                for (int k = 0; k < next.length; ++k) {
-                  termFreqs[i++] = 1 + (int) next.longs[next.offset + k];
+                reader.Reset(vectorsStream, totalTerms);
+                for (int i = 0; i < totalTerms; )
+                {
+                    //hackmp - TODO - NEEDS REVIEW
+                    //long > int
+                    LongsRef next = reader.Next((int)totalTerms - i);
+                    for (int k = 0; k < next.length; ++k)
+                    {
+                        termFreqs[i++] = 1 + (int)next.longs[next.offset + k];
+                    }
                 }
-              }
             }
 
             // total number of positions, offsets and payloads
             int totalPositions = 0, totalOffsets = 0, totalPayloads = 0;
-            for (int i = 0, termIndex = 0; i < totalFields; ++i) 
+            for (int i = 0, termIndex = 0; i < totalFields; ++i)
             {
-              int f = (int) flags.Get(i);
-              int termCount = (int) numTerms.Get(i);
-              for (int j = 0; j < termCount; ++j) {
-                int freq = termFreqs[termIndex++];
-                if ((f & POSITIONS) != 0) {
-                  totalPositions += freq;
-                }
-                if ((f & OFFSETS) != 0) {
-                  totalOffsets += freq;
-                }
-                if ((f & PAYLOADS) != 0) {
-                  totalPayloads += freq;
+                int f = (int)flags.Get(i);
+                int termCount = (int)numTerms.Get(i);
+                for (int j = 0; j < termCount; ++j)
+                {
+                    int freq = termFreqs[termIndex++];
+                    if ((f & CompressingTermVectorsWriter.POSITIONS) != 0)
+                    {
+                        totalPositions += freq;
+                    }
+                    if ((f & CompressingTermVectorsWriter.OFFSETS) != 0)
+                    {
+                        totalOffsets += freq;
+                    }
+                    if ((f & CompressingTermVectorsWriter.PAYLOADS) != 0)
+                    {
+                        totalPayloads += freq;
+                    }
                 }
-              }
             }
 
-            int[][] positionIndex = positionIndex(skip, numFields, numTerms, termFreqs);
+            int[][] positionIndex = PositionIndex(skip, numFields, numTerms, termFreqs);
             int[][] positions, startOffsets, lengths;
-            if (totalPositions > 0) {
-              positions = readPositions(skip, numFields, flags, numTerms, termFreqs, POSITIONS, totalPositions, positionIndex);
-            } else {
-              positions = new int[numFields][];
-            }
-
-            if (totalOffsets > 0) {
-              // average number of chars per term
-              float[] charsPerTerm = new float[fieldNums.Length];
-              for (int i = 0; i < charsPerTerm.Length; ++i) {
-                charsPerTerm[i] = Number.IntBitsToFloat(vectorsStream.ReadInt());
-              }
-              startOffsets = readPositions(skip, numFields, flags, numTerms, termFreqs, OFFSETS, totalOffsets, positionIndex);
-              lengths = readPositions(skip, numFields, flags, numTerms, termFreqs, OFFSETS, totalOffsets, positionIndex);
-
-              for (int i = 0; i < numFields; ++i) {
-                int[] fStartOffsets = startOffsets[i];
-                int[] fPositions = positions[i];
-                // patch offsets from positions
-                if (fStartOffsets != null && fPositions != null) {
-                  float fieldCharsPerTerm = charsPerTerm[fieldNumOffs[i]];
-                  for (int j = 0; j < startOffsets[i].Length; ++j) {
-                    fStartOffsets[j] += (int) (fieldCharsPerTerm * fPositions[j]);
-                  }
-                }
-                if (fStartOffsets != null) {
-                  int[] fPrefixLengths = prefixLengths[i];
-                  int[] fSuffixLengths = suffixLengths[i];
-                  int[] fLengths = lengths[i];
-                    //hackmp - TODO - NEEDS REVIEW
-                    //long > int
-                  for (int j = 0, end = (int) numTerms.Get((int)skip + i); j < end; ++j) {
-                    // delta-decode start offsets and  patch lengths using term lengths
-                    int termLength = fPrefixLengths[j] + fSuffixLengths[j];
-                    lengths[i][positionIndex[i][j]] += termLength;
-                    for (int k = positionIndex[i][j] + 1; k < positionIndex[i][j + 1]; ++k) {
-                      fStartOffsets[k] += fStartOffsets[k - 1];
-                      fLengths[k] += termLength;
+            if (totalPositions > 0)
+            {
+                positions = ReadPositions(skip, numFields, flags, numTerms, termFreqs, CompressingTermVectorsWriter.POSITIONS, totalPositions, positionIndex);
+            }
+            else
+            {
+                positions = new int[numFields][];
+            }
+
+            if (totalOffsets > 0)
+            {
+                // average number of chars per term
+                float[] charsPerTerm = new float[fieldNums.Length];
+                for (int i = 0; i < charsPerTerm.Length; ++i)
+                {
+                    charsPerTerm[i] = Number.IntBitsToFloat(vectorsStream.ReadInt());
+                }
+                startOffsets = ReadPositions(skip, numFields, flags, numTerms, termFreqs, CompressingTermVectorsWriter.OFFSETS, totalOffsets, positionIndex);
+                lengths = ReadPositions(skip, numFields, flags, numTerms, termFreqs, CompressingTermVectorsWriter.OFFSETS, totalOffsets, positionIndex);
+
+                for (int i = 0; i < numFields; ++i)
+                {
+                    int[] fStartOffsets = startOffsets[i];
+                    int[] fPositions = positions[i];
+                    // patch offsets from positions
+                    if (fStartOffsets != null && fPositions != null)
+                    {
+                        float fieldCharsPerTerm = charsPerTerm[fieldNumOffs[i]];
+                        for (int j = 0; j < startOffsets[i].Length; ++j)
+                        {
+                            fStartOffsets[j] += (int)(fieldCharsPerTerm * fPositions[j]);
+                        }
                     }
-                  }
-                }
-              }
-            } else {
-              startOffsets = lengths = new int[numFields][];
-            }
-            if (totalPositions > 0) {
-              // delta-decode positions
-              for (int i = 0; i < numFields; ++i) {
-                int[] fPositions = positions[i];
-                int[] fpositionIndex = positionIndex[i];
-                if (fPositions != null) {
-                    //hackmp - TODO - NEED REVIEW
-                    //long > int
-                  for (int j = 0, end = (int) numTerms.Get((int)skip + i); j < end; ++j) {
-                    // delta-decode start offsets
-                    for (int k = fpositionIndex[j] + 1; k < fpositionIndex[j + 1]; ++k) {
-                      fPositions[k] += fPositions[k - 1];
+                    if (fStartOffsets != null)
+                    {
+                        int[] fPrefixLengths = prefixLengths[i];
+                        int[] fSuffixLengths = suffixLengths[i];
+                        int[] fLengths = lengths[i];
+                        //hackmp - TODO - NEEDS REVIEW
+                        //long > int
+                        for (int j = 0, end = (int)numTerms.Get((int)skip + i); j < end; ++j)
+                        {
+                            // delta-decode start offsets and  patch lengths using term lengths
+                            int termLength = fPrefixLengths[j] + fSuffixLengths[j];
+                            lengths[i][positionIndex[i][j]] += termLength;
+                            for (int k = positionIndex[i][j] + 1; k < positionIndex[i][j + 1]; ++k)
+                            {
+                                fStartOffsets[k] += fStartOffsets[k - 1];
+                                fLengths[k] += termLength;
+                            }
+                        }
+                    }
+                }
+            }
+            else
+            {
+                startOffsets = lengths = new int[numFields][];
+            }
+            if (totalPositions > 0)
+            {
+                // delta-decode positions
+                for (int i = 0; i < numFields; ++i)
+                {
+                    int[] fPositions = positions[i];
+                    int[] fpositionIndex = positionIndex[i];
+                    if (fPositions != null)
+                    {
+                        //hackmp - TODO - NEED REVIEW
+                        //long > int
+                        for (int j = 0, end = (int)numTerms.Get((int)skip + i); j < end; ++j)
+                        {
+                            // delta-decode start offsets
+                            for (int k = fpositionIndex[j] + 1; k < fpositionIndex[j + 1]; ++k)
+                            {
+                                fPositions[k] += fPositions[k - 1];
+                            }
+                        }
                     }
-                  }
                 }
-              }
             }
 
             // payload lengths
@@ -389,64 +463,77 @@ namespace Lucene.Net.Codecs.Compressing
             long totalPayloadLength = 0;
             int payloadOff = 0;
             int payloadLen = 0;
-            if (totalPayloads > 0) {
-              reader.Reset(vectorsStream, totalPayloads);
-              // skip
-              int termIndex = 0;
-              for (int i = 0; i < skip; ++i) {
-                int f = (int) flags.Get(i);
-                int termCount = (int) numTerms.Get(i);
-                if ((f & PAYLOADS) != 0) {
-                  for (int j = 0; j < termCount; ++j) {
-                    int freq = termFreqs[termIndex + j];
-                    for (int k = 0; k < freq; ++k) {
-                      int l = (int) reader.Next();
-                      payloadOff += l;
+            if (totalPayloads > 0)
+            {
+                reader.Reset(vectorsStream, totalPayloads);
+                // skip
+                int termIndex = 0;
+                for (int i = 0; i < skip; ++i)
+                {
+                    int f = (int)flags.Get(i);
+                    int termCount = (int)numTerms.Get(i);
+                    if ((f & CompressingTermVectorsWriter.PAYLOADS) != 0)
+                    {
+                        for (int j = 0; j < termCount; ++j)
+                        {
+                            int freq = termFreqs[termIndex + j];
+                            for (int k = 0; k < freq; ++k)
+                            {
+                                int l = (int)reader.Next();
+                                payloadOff += l;
+                            }
+                        }
                     }
-                  }
+                    termIndex += termCount;
                 }
-                termIndex += termCount;
-              }
-              totalPayloadLength = payloadOff;
-              // read doc payload lengths
-              for (int i = 0; i < numFields; ++i) {
-                  //hackmp - TODO - NEEDS REVIEW
-                  //long > int
-                int f = (int) flags.Get((int)skip + i);
-                int termCount = (int) numTerms.Get((int)skip + i);
-                if ((f & PAYLOADS) != 0) {
-                  int totalFreq = positionIndex[i][termCount];
-                  payloadIndex[i] = new int[totalFreq + 1];
-                  int posIdx = 0;
-                  payloadIndex[i][posIdx] = payloadLen;
-                  for (int j = 0; j < termCount; ++j) {
-                    int freq = termFreqs[termIndex + j];
-                    for (int k = 0; k < freq; ++k) {
-                      int payloadLength = (int) reader.Next();
-                      payloadLen += payloadLength;
-                      payloadIndex[i][posIdx+1] = payloadLen;
-                      ++posIdx;
+                totalPayloadLength = payloadOff;
+                // read doc payload lengths
+                for (int i = 0; i < numFields; ++i)
+                {
+                    //hackmp - TODO - NEEDS REVIEW
+                    //long > int
+                    int f = (int)flags.Get((int)skip + i);
+                    int termCount = (int)numTerms.Get((int)skip + i);
+                    if ((f & CompressingTermVectorsWriter.PAYLOADS) != 0)
+                    {
+                        int totalFreq = positionIndex[i][termCount];
+                        payloadIndex[i] = new int[totalFreq + 1];
+                        int posIdx = 0;
+                        payloadIndex[i][posIdx] = payloadLen;
+                        for (int j = 0; j < termCount; ++j)
+                        {
+                            int freq = termFreqs[termIndex + j];
+                            for (int k = 0; k < freq; ++k)
+                            {
+                                int payloadLength = (int)reader.Next();
+                                payloadLen += payloadLength;
+                                payloadIndex[i][posIdx + 1] = payloadLen;
+                                ++posIdx;
+                            }
+                        }
                     }
-                  }
+                    termIndex += termCount;
                 }
-                termIndex += termCount;
-              }
-              totalPayloadLength += payloadLen;
-              for (long i = skip + numFields; i < totalFields; ++i) {
-                  //hackmp - TODO - NEEDS REVIEW
-                  //long > int
-                int f = (int) flags.Get((int)i);
-                int termCount = (int) numTerms.Get((int)i);
-                if ((f & PAYLOADS) != 0) {
-                  for (int j = 0; j < termCount; ++j) {
-                    int freq = termFreqs[termIndex + j];
-                    for (int k = 0; k < freq; ++k) {
-                      totalPayloadLength += reader.Next();
+                totalPayloadLength += payloadLen;
+                for (long i = skip + numFields; i < totalFields; ++i)
+                {
+                    //hackmp - TODO - NEEDS REVIEW
+                    //long > int
+                    int f = (int)flags.Get((int)i);
+                    int termCount = (int)numTerms.Get((int)i);
+                    if ((f & CompressingTermVectorsWriter.PAYLOADS) != 0)
+                    {
+                        for (int j = 0; j < termCount; ++j)
+                        {
+                            int freq = termFreqs[termIndex + j];
+                            for (int k = 0; k < freq; ++k)
+                            {
+                                totalPayloadLength += reader.Next();
+                            }
+                        }
                     }
-                  }
+                    termIndex += termCount;
                 }
-                termIndex += termCount;
-              }
             }
 
             // decompress data
@@ -457,51 +544,654 @@ namespace Lucene.Net.Codecs.Compressing
             suffixBytes.length = (int)docLen;
             BytesRef payloadBytes = new BytesRef(suffixBytes.bytes, suffixBytes.offset + (int)docLen, payloadLen);
 
-            int[] fieldFlags = new int[numFields];
-            for (int i = 0; i < numFields; ++i) {
+            int[] fieldFlags2 = new int[numFields];
+            for (int i = 0; i < numFields; ++i)
+            {
                 //hackmp - TODO - NEEDS REVIEW
                 //long > int
-              fieldFlags[i] = (int) flags.Get((int)skip + i);
+                fieldFlags2[i] = (int)flags.Get((int)skip + i);
             }
 
             int[] fieldNumTerms = new int[numFields];
-            for (int i = 0; i < numFields; ++i) {
+            for (int i = 0; i < numFields; ++i)
+            {
                 //hackmp - TODO - NEEDS REVIEW
-              fieldNumTerms[i] = (int) numTerms.Get((int)skip + i);
+                fieldNumTerms[i] = (int)numTerms.Get((int)skip + i);
             }
 
             int[][] fieldTermFreqs = new int[numFields][];
             {
-              long termIdx = 0;
-              for (int i = 0; i < skip; ++i) {
-                termIdx += numTerms.Get(i);
-              }
-              for (int i = 0; i < numFields; ++i) {
-                  //hackmp - TODO - NEEDS REVIEW
-                  //long > int
-                long termCount = (int) numTerms.Get((int)skip + i);
-                fieldTermFreqs[i] = new int[termCount];
-                for (int j = 0; j < termCount; ++j) {
-                  fieldTermFreqs[i][j] = termFreqs[termIdx++];
+                long termIdx = 0;
+                for (int i = 0; i < skip; ++i)
+                {
+                    termIdx += numTerms.Get(i);
+                }
+                for (int i = 0; i < numFields; ++i)
+                {
+                    //hackmp - TODO - NEEDS REVIEW
+                    //long > int
+                    long termCount = (int)numTerms.Get((int)skip + i);
+                    fieldTermFreqs[i] = new int[termCount];
+                    for (int j = 0; j < termCount; ++j)
+                    {
+                        fieldTermFreqs[i][j] = termFreqs[termIdx++];
+                    }
                 }
-              }
             }
 
-            return new TVFields(fieldNums, fieldFlags, fieldNumOffs, fieldNumTerms, fieldLengths,
+            return new TVFields(this, fieldNums, fieldFlags2, fieldNumOffs, fieldNumTerms, fieldLengths,
                 prefixLengths, suffixLengths, fieldTermFreqs,
                 positionIndex, positions, startOffsets, lengths,
                 payloadBytes, payloadIndex,
                 suffixBytes);
         }
 
-        public override object Clone()
+        private int[][] PositionIndex(int skip, int numFields, PackedInts.IReader numTerms, int[] termFreqs)
         {
-            return new CompressingTermVectorsReader(this);
+            int[][] positionIndex = new int[numFields][];
+            int termIndex = 0;
+            for (int i = 0; i < skip; ++i)
+            {
+                int termCount = (int)numTerms.Get(i);
+                termIndex += termCount;
+            }
+            for (int i = 0; i < numFields; ++i)
+            {
+                int termCount = (int)numTerms.Get(skip + i);
+                positionIndex[i] = new int[termCount + 1];
+                for (int j = 0; j < termCount; ++j)
+                {
+                    int freq = termFreqs[termIndex + j];
+                    positionIndex[i][j + 1] = positionIndex[i][j] + freq;
+                }
+                termIndex += termCount;
+            }
+            return positionIndex;
         }
 
-        protected override void Dispose(bool disposing)
+        private int[][] ReadPositions(int skip, int numFields, PackedInts.IReader flags, PackedInts.IReader numTerms, int[] termFreqs, int flag, int totalPositions, int[][] positionIndex)
         {
-            throw new NotImplementedException();
+            int[][] positions = new int[numFields][];
+            reader.Reset(vectorsStream, totalPositions);
+            // skip
+            int toSkip = 0;
+            int termIndex = 0;
+            for (int i = 0; i < skip; ++i)
+            {
+                int f = (int)flags.Get(i);
+                int termCount = (int)numTerms.Get(i);
+                if ((f & flag) != 0)
+                {
+                    for (int j = 0; j < termCount; ++j)
+                    {
+                        int freq = termFreqs[termIndex + j];
+                        toSkip += freq;
+                    }
+                }
+                termIndex += termCount;
+            }
+            reader.Skip(toSkip);
+            // read doc positions
+            for (int i = 0; i < numFields; ++i)
+            {
+                int f = (int)flags.Get(skip + i);
+                int termCount = (int)numTerms.Get(skip + i);
+                if ((f & flag) != 0)
+                {
+                    int totalFreq = positionIndex[i][termCount];
+                    int[] fieldPositions = new int[totalFreq];
+                    positions[i] = fieldPositions;
+                    for (int j = 0; j < totalFreq; )
+                    {
+                        LongsRef nextPositions = reader.Next(totalFreq - j);
+                        for (int k = 0; k < nextPositions.length; ++k)
+                        {
+                            fieldPositions[j++] = (int)nextPositions.longs[nextPositions.offset + k];
+                        }
+                    }
+                }
+                termIndex += termCount;
+            }
+            reader.Skip(totalPositions - reader.Ord);
+            return positions;
+        }
+
+        private class TVFields : Fields
+        {
+            private readonly int[] fieldNums, fieldFlags, fieldNumOffs, numTerms, fieldLengths;
+            private readonly int[][] prefixLengths, suffixLengths, termFreqs, positionIndex, positions, startOffsets, lengths, payloadIndex;
+            private readonly BytesRef suffixBytes, payloadBytes;
+
+            private readonly CompressingTermVectorsReader parent;
+
+            public TVFields(CompressingTermVectorsReader parent, int[] fieldNums, int[] fieldFlags, int[] fieldNumOffs, int[] numTerms, int[] fieldLengths,
+                int[][] prefixLengths, int[][] suffixLengths, int[][] termFreqs,
+                int[][] positionIndex, int[][] positions, int[][] startOffsets, int[][] lengths,
+                BytesRef payloadBytes, int[][] payloadIndex,
+                BytesRef suffixBytes)
+            {
+                this.parent = parent; // .NET port
+
+                this.fieldNums = fieldNums;
+                this.fieldFlags = fieldFlags;
+                this.fieldNumOffs = fieldNumOffs;
+                this.numTerms = numTerms;
+                this.fieldLengths = fieldLengths;
+                this.prefixLengths = prefixLengths;
+                this.suffixLengths = suffixLengths;
+                this.termFreqs = termFreqs;
+                this.positionIndex = positionIndex;
+                this.positions = positions;
+                this.startOffsets = startOffsets;
+                this.lengths = lengths;
+                this.payloadBytes = payloadBytes;
+                this.payloadIndex = payloadIndex;
+                this.suffixBytes = suffixBytes;
+            }
+
+            public override IEnumerator<string> GetEnumerator()
+            {
+                return GetFieldInfoNameEnumerable().GetEnumerator();
+            }
+
+            private IEnumerable<string> GetFieldInfoNameEnumerable()
+            {
+                int i = 0;
+
+                while (i < fieldNumOffs.Length)
+                {
+                    int fieldNum = fieldNums[fieldNumOffs[i++]];
+                    yield return parent.fieldInfos.FieldInfo(fieldNum).name;
+                }
+            }
+
+            public override Terms Terms(string field)
+            {
+                FieldInfo fieldInfo = parent.fieldInfos.FieldInfo(field);
+                if (fieldInfo == null)
+                {
+                    return null;
+                }
+                int idx = -1;
+                for (int i = 0; i < fieldNumOffs.Length; ++i)
+                {
+                    if (fieldNums[fieldNumOffs[i]] == fieldInfo.number)
+                    {
+                        idx = i;
+                        break;
+                    }
+                }
+
+                if (idx == -1 || numTerms[idx] == 0)
+                {
+                    // no term
+                    return null;
+                }
+                int fieldOff = 0, fieldLen = -1;
+                for (int i = 0; i < fieldNumOffs.Length; ++i)
+                {
+                    if (i < idx)
+                    {
+                        fieldOff += fieldLengths[i];
+                    }
+                    else
+                    {
+                        fieldLen = fieldLengths[i];
+                        break;
+                    }
+                }
+                //assert fieldLen >= 0;
+                return new TVTerms(parent, numTerms[idx], fieldFlags[idx],
+                    prefixLengths[idx], suffixLengths[idx], termFreqs[idx],
+                    positionIndex[idx], positions[idx], startOffsets[idx], lengths[idx],
+                    payloadIndex[idx], payloadBytes,
+                    new BytesRef(suffixBytes.bytes, suffixBytes.offset + fieldOff, fieldLen));
+            }
+
+            public override int Size
+            {
+                get { return fieldNumOffs.Length; }
+            }
         }
+
+        private class TVTerms : Terms
+        {
+            private readonly int numTerms, flags;
+            private readonly int[] prefixLengths, suffixLengths, termFreqs, positionIndex, positions, startOffsets, lengths, payloadIndex;
+            private readonly BytesRef termBytes, payloadBytes;
+
+            private readonly CompressingTermVectorsReader parent;
+
+            internal TVTerms(CompressingTermVectorsReader parent, int numTerms, int flags, int[] prefixLengths, int[] suffixLengths, int[] termFreqs,
+                int[] positionIndex, int[] positions, int[] startOffsets, int[] lengths,
+                int[] payloadIndex, BytesRef payloadBytes,
+                BytesRef termBytes)
+            {
+                this.parent = parent; // .NET Port
+
+                this.numTerms = numTerms;
+                this.flags = flags;
+                this.prefixLengths = prefixLengths;
+                this.suffixLengths = suffixLengths;
+                this.termFreqs = termFreqs;
+                this.positionIndex = positionIndex;
+                this.positions = positions;
+                this.startOffsets = startOffsets;
+                this.lengths = lengths;
+                this.payloadIndex = payloadIndex;
+                this.payloadBytes = payloadBytes;
+                this.termBytes = termBytes;
+            }
+
+            public override TermsEnum Iterator(TermsEnum reuse)
+            {
+                TVTermsEnum termsEnum;
+                if (reuse != null && reuse is TVTermsEnum)
+                {
+                    termsEnum = (TVTermsEnum)reuse;
+                }
+                else
+                {
+                    termsEnum = new TVTermsEnum();
+                }
+                termsEnum.Reset(numTerms, flags, prefixLengths, suffixLengths, termFreqs, positionIndex, positions, startOffsets, lengths,
+                    payloadIndex, payloadBytes,
+                    new ByteArrayDataInput((byte[])(Array)termBytes.bytes, termBytes.offset, termBytes.length));
+                return termsEnum;
+            }
+
+            public override IComparer<BytesRef> Comparator
+            {
+                get { return BytesRef.UTF8SortedAsUnicodeComparer; }
+            }
+
+            public override long Size
+            {
+                get { return numTerms; }
+            }
+
+            public override long SumTotalTermFreq
+            {
+                get { return -1L; }
+            }
+
+            public override long SumDocFreq
+            {
+                get { return numTerms; }
+            }
+
+            public override int DocCount
+            {
+                get { return 1; }
+            }
+
+            public override bool HasOffsets
+            {
+                get { return (flags & CompressingTermVectorsWriter.OFFSETS) != 0; }
+            }
+
+            public override bool HasPositions
+            {
+                get { return (flags & CompressingTermVectorsWriter.POSITIONS) != 0; }
+            }
+
+            public override bool HasPayloads
+            {
+                get { return (flags & CompressingTermVectorsWriter.PAYLOADS) != 0; }
+            }
+        }
+
+        private class TVTermsEnum : TermsEnum
+        {
+            private int numTerms, startPos, ord;
+            private int[] prefixLengths, suffixLengths, termFreqs, positionIndex, positions, startOffsets, lengths, payloadIndex;
+            private ByteArrayDataInput input;
+            private BytesRef payloads;
+            private readonly BytesRef term;
+
+            internal TVTermsEnum()
+            {
+                term = new BytesRef(16);
+            }
+
+            internal void Reset(int numTerms, int flags, int[] prefixLengths, int[] suffixLengths, int[] termFreqs, int[] positionIndex, int[] positions, int[] startOffsets, int[] lengths,
+                int[] payloadIndex, BytesRef payloads, ByteArrayDataInput input)
+            {
+                this.numTerms = numTerms;
+                this.prefixLengths = prefixLengths;
+                this.suffixLengths = suffixLengths;
+                this.termFreqs = termFreqs;
+                this.positionIndex = positionIndex;
+                this.positions = positions;
+                this.startOffsets = startOffsets;
+                this.lengths = lengths;
+                this.payloadIndex = payloadIndex;
+                this.payloads = payloads;
+                this.input = input;
+                startPos = input.Position;
+                Reset();
+            }
+
+            internal void Reset()
+            {
+                term.length = 0;
+                input.Position = startPos;
+                ord = -1;
+            }
+
+            public override BytesRef Next()
+            {
+                if (ord == numTerms - 1)
+                {
+                    return null;
+                }
+                else
+                {
+                    //assert ord < numTerms;
+                    ++ord;
+                }
+
+                // read term
+                term.offset = 0;
+                term.length = prefixLengths[ord] + suffixLengths[ord];
+                if (term.length > term.bytes.Length)
+                {
+                    term.bytes = ArrayUtil.Grow(term.bytes, term.length);
+                }
+                input.ReadBytes(term.bytes, prefixLengths[ord], suffixLengths[ord]);
+
+                return term;
+            }
+
+            public override IComparer<BytesRef> Comparator
+            {
+                get { return BytesRef.UTF8SortedAsUnicodeComparer; }
+            }
+
+            public override SeekStatus SeekCeil(BytesRef text, bool useCache)
+            {
+                if (ord < numTerms && ord >= 0)
+                {
+                    int cmp = Term.CompareTo(text);
+                    if (cmp == 0)
+                    {
+                        return SeekStatus.FOUND;
+                    }
+                    else if (cmp > 0)
+                    {
+                        Reset();
+                    }
+                }
+                // linear scan
+                while (true)
+                {
+                    BytesRef term = Next();
+                    if (term == null)
+                    {
+                        return SeekStatus.END;
+                    }
+                    int cmp = term.CompareTo(text);
+                    if (cmp > 0)
+                    {
+                        return SeekStatus.NOT_FOUND;
+                    }
+                    else if (cmp == 0)
+                    {
+                        return SeekStatus.FOUND;
+                    }
+                }
+            }
+
+            public override void SeekExact(long ord)
+            {
+                if (ord < -1 || ord >= numTerms)
+                {
+                    throw new System.IO.IOException("ord is out of range: ord=" + ord + ", numTerms=" + numTerms);
+                }
+                if (ord < this.ord)
+                {
+                    Reset();
+                }
+                for (int i = this.ord; i < ord; ++i)
+                {
+                    Next();
+                }
+                //assert ord == this.ord();
+            }
+
+            public override BytesRef Term
+            {
+                get { return term; }
+            }
+
+            public override long Ord
+            {
+                get { return ord; }
+            }
+
+            public override int DocFreq
+            {
+                get { return 1; }
+            }
+
+            public override long TotalTermFreq
+            {
+                get { return termFreqs[ord]; }
+            }
+
+            public override DocsEnum Docs(IBits liveDocs, DocsEnum reuse, int flags)
+            {
+                TVDocsEnum docsEnum;
+                if (reuse != null && reuse is TVDocsEnum)
+                {
+                    docsEnum = (TVDocsEnum)reuse;
+                }
+                else
+                {
+                    docsEnum = new TVDocsEnum();
+                }
+
+                docsEnum.Reset(liveDocs, termFreqs[ord], positionIndex[ord], positions, startOffsets, lengths, payloads, payloadIndex);
+                return docsEnum;
+            }
+
+            public override DocsAndPositionsEnum DocsAndPositions(IBits liveDocs, DocsAndPositionsEnum reuse, int flags)
+            {
+                if (positions == null && startOffsets == null)
+                {
+                    return null;
+                }
+                // TODO: slightly sheisty
+                return (DocsAndPositionsEnum)Docs(liveDocs, reuse, flags);
+            }
+        }
+
+        private class TVDocsEnum : DocsAndPositionsEnum
+        {
+            private IBits liveDocs;
+            private int doc = -1;
+            private int termFreq;
+            private int positionIndex;
+            private int[] positions;
+            private int[] startOffsets;
+            private int[] lengths;
+            private readonly BytesRef payload;
+            private int[] payloadIndex;
+            private int basePayloadOffset;
+            private int i;
+
+            internal TVDocsEnum()
+            {
+                payload = new BytesRef();
+            }
+
+            public void Reset(IBits liveDocs, int freq, int positionIndex, int[] positions,
+                int[] startOffsets, int[] lengths, BytesRef payloads,
+                int[] payloadIndex)
+            {
+                this.liveDocs = liveDocs;
+                this.termFreq = freq;
+                this.positionIndex = positionIndex;
+                this.positions = positions;
+                this.startOffsets = startOffsets;
+                this.lengths = lengths;
+                this.basePayloadOffset = payloads.offset;
+                this.payload.bytes = payloads.bytes;
+                payload.offset = payload.length = 0;
+                this.payloadIndex = payloadIndex;
+
+                doc = i = -1;
+            }
+
+            private void CheckDoc()
+            {
+                if (doc == NO_MORE_DOCS)
+                {
+                    throw new InvalidOperationException("DocsEnum exhausted");
+                }
+                else if (doc == -1)
+                {
+                    throw new InvalidOperationException("DocsEnum not started");
+                }
+            }
+
+            private void CheckPosition()
+            {
+                CheckDoc();
+                if (i < 0)
+                {
+                    throw new InvalidOperationException("Position enum not started");
+                }
+                else if (i >= termFreq)
+                {
+                    throw new InvalidOperationException("Read past last position");
+                }
+            }
+
+            public override int NextPosition()
+            {
+                if (doc != 0)
+                {
+                    throw new InvalidOperationException();
+                }
+                else if (i >= termFreq - 1)
+                {
+                    throw new InvalidOperationException("Read past last position");
+                }
+
+                ++i;
+
+                if (payloadIndex != null)
+                {
+                    payload.offset = basePayloadOffset + payloadIndex[positionIndex + i];
+                    payload.length = payloadIndex[positionIndex + i + 1] - payloadIndex[positionIndex + i];
+                }
+
+                if (positions == null)
+                {
+                    return -1;
+                }
+                else
+                {
+                    return positions[positionIndex + i];
+                }
+            }
+
+            public override int StartOffset
+            {
+                get
+                {
+                    CheckPosition();
+                    if (startOffsets == null)
+                    {
+                        return -1;
+                    }
+                    else
+                    {
+                        return startOffsets[positionIndex + i];
+                    }
+                }
+            }
+
+            public override int EndOffset
+            {
+                get
+                {
+                    CheckPosition();
+                    if (startOffsets == null)
+                    {
+                        return -1;
+                    }
+                    else
+                    {
+                        return startOffsets[positionIndex + i] + lengths[positionIndex + i];
+                    }
+                }
+            }
+
+            public override BytesRef Payload
+            {
+                get
+                {
+                    CheckPosition();
+                    if (payloadIndex == null || payload.length == 0)
+                    {
+                        return null;
+                    }
+                    else
+                    {
+                        return payload;
+                    }
+                }
+            }
+
+            public override int Freq
+            {
+                get
+                {
+                    CheckDoc();
+                    return termFreq;
+                }
+            }
+
+            public override int DocID
+            {
+                get { return doc; }
+            }
+
+            public override int NextDoc()
+            {
+                if (doc == -1 && (liveDocs == null || liveDocs[0]))
+                {
+                    return (doc = 0);
+                }
+                else
+                {
+                    return (doc = NO_MORE_DOCS);
+                }
+            }
+
+            public override int Advance(int target)
+            {
+                return SlowAdvance(target);
+            }
+
+            public override long Cost
+            {
+                get { return 1; }
+            }
+        }
+
+
+        private static int Sum(int[] arr)
+        {
+            int sum = 0;
+            foreach (int el in arr)
+            {
+                sum += el;
+            }
+            return sum;
+        }
+
     }
 }


[10/50] [abbrv] git commit: nearing completion - this file 50%, overall 90% on namespace

Posted by mh...@apache.org.
nearing completion - this file 50%, overall 90% on namespace


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/d9ad1fea
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/d9ad1fea
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/d9ad1fea

Branch: refs/heads/branch_4x
Commit: d9ad1fea5fe5fb1a72a7248fee1e2c04d0a20253
Parents: e47e663
Author: Mike Potts <mi...@feature23.com>
Authored: Sun Jul 14 12:03:09 2013 -0400
Committer: Mike Potts <mi...@feature23.com>
Committed: Sun Jul 14 12:03:09 2013 -0400

----------------------------------------------------------------------
 .../CompressingStoredFieldsIndexReader.cs       |   2 +-
 .../CompressingStoredFieldsIndexWriter.cs       | 167 ++++++
 .../Compressing/CompressingTermVectorsFormat.cs |  28 +
 .../Compressing/CompressingTermVectorsReader.cs | 507 +++++++++++++++++++
 src/core/Lucene.Net.csproj                      |   3 +
 5 files changed, 706 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d9ad1fea/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
index f981b32..d5a16df 100644
--- a/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
@@ -147,7 +147,7 @@ namespace Lucene.Net.Codecs.Compressing
             return hi;
           }
 
-          private long getStartPointer(int docID) 
+          public long GetStartPointer(int docID) 
           {
             if (docID < 0 || docID >= maxDoc) {
               throw new ArgumentException("docID out of range [0-" + maxDoc + "]: " + docID);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d9ad1fea/src/core/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
new file mode 100644
index 0000000..ece363a
--- /dev/null
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
@@ -0,0 +1,167 @@
+using Lucene.Net.Store;
+using Lucene.Net.Util.Packed;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Codecs.Compressing
+{
+    public sealed class CompressingStoredFieldsIndexWriter : IDisposable
+    {
+        static readonly int BLOCK_SIZE = 1024; // number of chunks to serialize at once
+
+        private IndexOutput fieldsIndexOut;
+        private int totalDocs;
+        private int blockDocs;
+        private int blockChunks;
+        private long firstStartPointer;
+        private long maxStartPointer;
+        private int[] docBaseDeltas;
+        private long[] startPointerDeltas;
+
+        static long moveSignToLowOrderBit(long n)
+        {
+            return (n >> 63) ^ (n << 1);
+        }
+
+        CompressingStoredFieldsIndexWriter(IndexOutput indexOutput)
+        {
+            this.fieldsIndexOut = indexOutput;
+            reset();
+            totalDocs = 0;
+            docBaseDeltas = new int[BLOCK_SIZE];
+            startPointerDeltas = new long[BLOCK_SIZE];
+            fieldsIndexOut.WriteVInt(PackedInts.VERSION_CURRENT);
+        }
+
+        private void reset()
+        {
+            blockChunks = 0;
+            blockDocs = 0;
+            firstStartPointer = -1; // means unset
+        }
+
+        private void writeBlock()
+        {
+            fieldsIndexOut.WriteVInt(blockChunks);
+
+            // The trick here is that we only store the difference from the average start
+            // pointer or doc base, this helps save bits per value.
+            // And in order to prevent a few chunks that would be far from the average to
+            // raise the number of bits per value for all of them, we only encode blocks
+            // of 1024 chunks at once
+            // See LUCENE-4512
+
+            // doc bases
+            int avgChunkDocs;
+            if (blockChunks == 1)
+            {
+                avgChunkDocs = 0;
+            }
+            else
+            {
+                //hackmp - TODO - This needs review.  The function as a whole is desgined with an int as the core value,
+                //including contracts on other methods.  I NEVER like casting from double to int, but for now...
+                avgChunkDocs = (int)Math.Round((float)(blockDocs - docBaseDeltas[blockChunks - 1]) / (blockChunks - 1));
+            }
+            fieldsIndexOut.WriteVInt(totalDocs - blockDocs); // docBase
+            fieldsIndexOut.WriteVInt(avgChunkDocs);
+            int docBase = 0;
+            long maxDelta = 0;
+            for (int i = 0; i < blockChunks; ++i)
+            {
+                int delta = docBase - avgChunkDocs * i;
+                maxDelta |= moveSignToLowOrderBit(delta);
+                docBase += docBaseDeltas[i];
+            }
+
+            int bitsPerDocBase = PackedInts.BitsRequired(maxDelta);
+            fieldsIndexOut.WriteVInt(bitsPerDocBase);
+            PackedInts.Writer writer = PackedInts.GetWriterNoHeader(fieldsIndexOut,
+                PackedInts.Format.PACKED, blockChunks, bitsPerDocBase, 1);
+            docBase = 0;
+            for (int i = 0; i < blockChunks; ++i)
+            {
+                long delta = docBase - avgChunkDocs * i;
+                writer.Add(moveSignToLowOrderBit(delta));
+                docBase += docBaseDeltas[i];
+            }
+            writer.Finish();
+
+            // start pointers
+            fieldsIndexOut.WriteVLong(firstStartPointer);
+            long avgChunkSize;
+            if (blockChunks == 1)
+            {
+                avgChunkSize = 0;
+            }
+            else
+            {
+                avgChunkSize = (maxStartPointer - firstStartPointer) / (blockChunks - 1);
+            }
+            fieldsIndexOut.WriteVLong(avgChunkSize);
+            long startPointer = 0;
+            maxDelta = 0;
+            for (int i = 0; i < blockChunks; ++i)
+            {
+                startPointer += startPointerDeltas[i];
+                long delta = startPointer - avgChunkSize * i;
+                maxDelta |= moveSignToLowOrderBit(delta);
+            }
+
+            int bitsPerStartPointer = PackedInts.BitsRequired(maxDelta);
+            fieldsIndexOut.WriteVInt(bitsPerStartPointer);
+            writer = PackedInts.GetWriterNoHeader(fieldsIndexOut, PackedInts.Format.PACKED,
+                blockChunks, bitsPerStartPointer, 1);
+            startPointer = 0;
+            for (int i = 0; i < blockChunks; ++i)
+            {
+                startPointer += startPointerDeltas[i];
+                long delta = startPointer - avgChunkSize * i;
+                writer.Add(moveSignToLowOrderBit(delta));
+            }
+            writer.Finish();
+        }
+
+        void writeIndex(int numDocs, long startPointer)
+        {
+            if (blockChunks == BLOCK_SIZE)
+            {
+                writeBlock();
+                reset();
+            }
+
+            if (firstStartPointer == -1)
+            {
+                firstStartPointer = maxStartPointer = startPointer;
+            }
+
+            docBaseDeltas[blockChunks] = numDocs;
+            startPointerDeltas[blockChunks] = startPointer - maxStartPointer;
+
+            ++blockChunks;
+            blockDocs += numDocs;
+            totalDocs += numDocs;
+            maxStartPointer = startPointer;
+        }
+
+        void finish(int numDocs)
+        {
+            if (numDocs != totalDocs)
+            {
+                throw new ArgumentOutOfRangeException("Expected " + numDocs + " docs, but got " + totalDocs);
+            }
+            if (blockChunks > 0)
+            {
+                writeBlock();
+            }
+            fieldsIndexOut.WriteVInt(0); // end marker
+        }
+
+        public void Dispose()
+        {
+            fieldsIndexOut.Dispose();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d9ad1fea/src/core/Codecs/Compressing/CompressingTermVectorsFormat.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingTermVectorsFormat.cs b/src/core/Codecs/Compressing/CompressingTermVectorsFormat.cs
new file mode 100644
index 0000000..0a2afd1
--- /dev/null
+++ b/src/core/Codecs/Compressing/CompressingTermVectorsFormat.cs
@@ -0,0 +1,28 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Codecs.Compressing
+{
+    public class CompressingTermVectorsFormat: TermVectorsFormat
+    {
+        private string formatName;
+        private string segmentSuffix;
+        private CompressionMode compressionMode;
+        private int chunkSize;
+
+        public CompressingTermVectorsFormat(String formatName, String segmentSuffix, 
+            CompressionMode compressionMode, int chunkSize)
+        {
+            this.formatName = formatName;
+            this.segmentSuffix = segmentSuffix;
+            this.compressionMode = compressionMode;
+            if (chunkSize < 1)
+            {
+                throw new ArgumentException("chunkSize must be >= 1");
+            }
+            this.chunkSize = chunkSize;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d9ad1fea/src/core/Codecs/Compressing/CompressingTermVectorsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingTermVectorsReader.cs b/src/core/Codecs/Compressing/CompressingTermVectorsReader.cs
new file mode 100644
index 0000000..0de0f4f
--- /dev/null
+++ b/src/core/Codecs/Compressing/CompressingTermVectorsReader.cs
@@ -0,0 +1,507 @@
+using Lucene.Net.Index;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Packed;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Codecs.Compressing
+{
+    public sealed class CompressingTermVectorsReader: IDisposable, TermVectorsReader
+    {
+        private FieldInfos fieldInfos;
+        CompressingStoredFieldsIndexReader indexReader;
+        IndexInput vectorsStream;
+        private int packedIntsVersion;
+        private CompressionMode compressionMode;
+        private Decompressor decompressor;
+        private int chunkSize;
+        private int numDocs;
+        private bool closed;
+        private BlockPackedReaderIterator reader;
+        
+        private CompressingTermVectorsReader(CompressingTermVectorsReader reader)
+        {
+            this.fieldInfos = reader.fieldInfos;
+            this.vectorsStream = (IndexInput)reader.vectorsStream.Clone();
+            this.indexReader = reader.indexReader.clone();
+            this.packedIntsVersion = reader.packedIntsVersion;
+            this.compressionMode = reader.compressionMode;
+            this.decompressor = (Decompressor)reader.decompressor.Clone();
+            this.chunkSize = reader.chunkSize;
+            this.numDocs = reader.numDocs;
+            this.reader = new BlockPackedReaderIterator(vectorsStream, packedIntsVersion, BLOCK_SIZE, 0);
+            this.closed = false;
+        }
+
+          /** Sole constructor. */
+        public CompressingTermVectorsReader(Directory d, SegmentInfo si, String segmentSuffix, FieldInfos fn,
+            IOContext context, String formatName, CompressionMode compressionMode) 
+        {
+            this.compressionMode = compressionMode;
+            string segment = si.name;
+            bool success = false;
+            fieldInfos = fn;
+            numDocs = si.DocCount;
+            IndexInput indexStream = null;
+            try {
+                vectorsStream = d.OpenInput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION), context);
+                string indexStreamFN = IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION);
+                indexStream = d.OpenInput(indexStreamFN, context);
+
+                string codecNameIdx = formatName + CODEC_SFX_IDX;
+                string codecNameDat = formatName + CODEC_SFX_DAT;
+                CodecUtil.CheckHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT);
+                CodecUtil.CheckHeader(vectorsStream, codecNameDat, VERSION_START, VERSION_CURRENT);
+
+                indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
+                indexStream = null;
+
+                packedIntsVersion = vectorsStream.ReadVInt();
+                chunkSize = vectorsStream.ReadVInt();
+                decompressor = compressionMode.newDecompressor();
+                this.reader = new BlockPackedReaderIterator(vectorsStream, packedIntsVersion, BLOCK_SIZE, 0);
+
+                success = true;
+            } finally {
+                if (!success) {
+                IOUtils.CloseWhileHandlingException(this, indexStream);
+                }
+            }
+        }
+
+        CompressionMode getCompressionMode() 
+        {
+            return compressionMode;
+        }
+
+        int getChunkSize() {
+            return chunkSize;
+        }
+
+        int getPackedIntsVersion() {
+            return packedIntsVersion;
+        }
+
+        CompressingStoredFieldsIndexReader getIndex() {
+            return indexReader;
+        }
+
+        IndexInput getVectorsStream() {
+            return vectorsStream;
+        }
+
+        /**
+        * @throws AlreadyClosedException if this TermVectorsReader is closed
+        */
+        private void ensureOpen()
+        {
+            if (closed) {
+                throw new AlreadyClosedException("this FieldsReader is closed");
+            }
+        }
+
+
+
+        public void Dispose()
+        {
+            if (!closed)
+            {
+                IOUtils.Close(vectorsStream, indexReader);
+                closed = true;
+            }
+        }
+
+        public override Index.Fields Get(int doc)
+        {
+            ensureOpen();
+
+            // seek to the right place
+            {
+              long startPointer = indexReader.GetStartPointer(doc);
+              vectorsStream.Seek(startPointer);
+            }
+
+            // decode
+            // - docBase: first doc ID of the chunk
+            // - chunkDocs: number of docs of the chunk
+            int docBase = vectorsStream.ReadVInt();
+            int chunkDocs = vectorsStream.ReadVInt();
+            if (doc < docBase || doc >= docBase + chunkDocs || docBase + chunkDocs > numDocs) {
+              throw new CorruptIndexException("docBase=" + docBase + ",chunkDocs=" + chunkDocs + ",doc=" + doc);
+            }
+
+            long skip; // number of fields to skip
+            long numFields; // number of fields of the document we're looking for
+            long totalFields; // total number of fields of the chunk (sum for all docs)
+            if (chunkDocs == 1) {
+              skip = 0;
+              numFields = totalFields = vectorsStream.ReadVInt();
+            } else {
+              reader.Reset(vectorsStream, chunkDocs);
+              long sum = 0;
+              for (int i = docBase; i < doc; ++i) {
+                sum += reader.Next();
+              }
+              skip = sum;
+              numFields = (int) reader.Next();
+              sum += numFields;
+              for (int i = doc + 1; i < docBase + chunkDocs; ++i) {
+                sum += reader.Next();
+              }
+              totalFields = sum;
+            }
+
+            if (numFields == 0) {
+              // no vectors
+              return null;
+            }
+
+            // read field numbers that have term vectors
+            int[] fieldNums;
+            {
+              int token = vectorsStream.ReadByte() & 0xFF;
+              int bitsPerFieldNum = token & 0x1F;
+              int totalDistinctFields = Number.URShift(token, 5);
+              if (totalDistinctFields == 0x07) {
+                totalDistinctFields += vectorsStream.ReadVInt();
+              }
+              ++totalDistinctFields;
+              PackedInts.ReaderIterator it = PackedInts.GetReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalDistinctFields, bitsPerFieldNum, 1);
+              fieldNums = new int[totalDistinctFields];
+              for (int i = 0; i < totalDistinctFields; ++i) {
+                fieldNums[i] = (int) it.Next();
+              }
+            }
+
+            // read field numbers and flags
+            int[] fieldNumOffs = new int[numFields];
+            PackedInts.Reader flags;
+            {
+              int bitsPerOff = PackedInts.BitsRequired(fieldNums.Length - 1);
+              PackedInts.Reader allFieldNumOffs = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, bitsPerOff);
+              switch (vectorsStream.ReadVInt()) {
+                case 0:
+                  PackedInts.Reader fieldFlags = PackedInts.getReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, fieldNums.Length, FLAGS_BITS);
+                  PackedInts.Mutable f = PackedInts.GetMutable(totalFields, FLAGS_BITS, PackedInts.COMPACT);
+                  for (int i = 0; i < totalFields; ++i) {
+                    int fieldNumOff = (int) allFieldNumOffs.Get(i);
+                    int fgs = (int) fieldFlags.Get(fieldNumOff);
+                    f.Set(i, fgs);
+                  }
+                  flags = f;
+                  break;
+                case 1:
+                  flags = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, FLAGS_BITS);
+                  break;
+                default:
+                  throw new AssertionError();
+              }
+              for (int i = 0; i < numFields; ++i) {
+                //hackmp - TODO - NEEDS REVIEW
+                //Here again, seems to be a larger impact to change all ints to long, than simply cast.  Will need Pual to review..
+                fieldNumOffs[i] = (int) allFieldNumOffs.Get((int)skip + i);
+              }
+            }
+
+            // number of terms per field for all fields
+            PackedInts.Reader numTerms;
+            long totalTerms;
+            {
+              int bitsRequired = vectorsStream.ReadVInt();
+              numTerms = PackedInts.GetReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, bitsRequired);
+              long sum = 0;
+              for (int i = 0; i < totalFields; ++i) {
+                sum += numTerms.Get(i);
+              }
+              totalTerms = sum;
+            }
+
+            // term lengths
+            long docOff = 0, docLen = 0, totalLen;
+            int[] fieldLengths = new int[numFields];
+            int[][] prefixLengths = new int[numFields][];
+            int[][] suffixLengths = new int[numFields][];
+            {
+              reader.Reset(vectorsStream, totalTerms);
+              // skip
+              long toSkip = 0;
+              for (int i = 0; i < skip; ++i) {
+                toSkip += numTerms.Get(i);
+              }
+              reader.Skip(toSkip);
+              // read prefix lengths
+              for (int i = 0; i < numFields; ++i) {
+                //hackmp - TODO - NEEDS REVIEW
+                //casting long to int
+                long termCount = (int) numTerms.Get((int)skip + i);
+                int[] fieldPrefixLengths = new int[termCount];
+                prefixLengths[i] = fieldPrefixLengths;
+                for (int j = 0; j < termCount; ) {
+                  //hackmp - TODO - NEEDS REVIEW
+                  //casting long to int..
+                  LongsRef next = reader.Next((int)termCount - j);
+                  for (int k = 0; k < next.length; ++k) {
+                    fieldPrefixLengths[j++] = (int) next.longs[next.offset + k];
+                  }
+                }
+              }
+              reader.Skip(totalTerms - reader.Ord);
+
+              reader.Reset(vectorsStream, totalTerms);
+              // skip
+              toSkip = 0;
+              for (int i = 0; i < skip; ++i) {
+                for (int j = 0; j < numTerms.Get(i); ++j) {
+                  docOff += reader.Next();
+                }
+              }
+              for (int i = 0; i < numFields; ++i) {
+                  //HACKMP - TODO - NEEDS REVIEW
+                  //..and again, casting long to int
+                int termCount = (int) numTerms.Get((int)skip + i);
+                int[] fieldSuffixLengths = new int[termCount];
+                suffixLengths[i] = fieldSuffixLengths;
+                for (int j = 0; j < termCount; ) {
+                  LongsRef next = reader.Next(termCount - j);
+                  for (int k = 0; k < next.length; ++k) {
+                    fieldSuffixLengths[j++] = (int) next.longs[next.offset + k];
+                  }
+                }
+                fieldLengths[i] = sum(suffixLengths[i]);
+                docLen += fieldLengths[i];
+              }     
+              totalLen = docOff + docLen;
+              for (long i = skip + numFields; i < totalFields; ++i) {
+                  //hackmp - TODO - NEEDS REVIEW
+                  //long > int
+                for (int j = 0; j < numTerms.Get((int)i); ++j) 
+                {
+                  totalLen += reader.Next();
+                }
+              }
+            }
+
+            // term freqs
+            int[] termFreqs = new int[totalTerms];
+            {
+              reader.Reset(vectorsStream, totalTerms);
+              for (int i = 0; i < totalTerms; ) {
+                //hackmp - TODO - NEEDS REVIEW
+                //long > int
+                LongsRef next = reader.Next((int)totalTerms - i);
+                for (int k = 0; k < next.length; ++k) {
+                  termFreqs[i++] = 1 + (int) next.longs[next.offset + k];
+                }
+              }
+            }
+
+            // total number of positions, offsets and payloads
+            int totalPositions = 0, totalOffsets = 0, totalPayloads = 0;
+            for (int i = 0, termIndex = 0; i < totalFields; ++i) 
+            {
+              int f = (int) flags.Get(i);
+              int termCount = (int) numTerms.Get(i);
+              for (int j = 0; j < termCount; ++j) {
+                int freq = termFreqs[termIndex++];
+                if ((f & POSITIONS) != 0) {
+                  totalPositions += freq;
+                }
+                if ((f & OFFSETS) != 0) {
+                  totalOffsets += freq;
+                }
+                if ((f & PAYLOADS) != 0) {
+                  totalPayloads += freq;
+                }
+              }
+            }
+
+            int[][] positionIndex = positionIndex(skip, numFields, numTerms, termFreqs);
+            int[][] positions, startOffsets, lengths;
+            if (totalPositions > 0) {
+              positions = readPositions(skip, numFields, flags, numTerms, termFreqs, POSITIONS, totalPositions, positionIndex);
+            } else {
+              positions = new int[numFields][];
+            }
+
+            if (totalOffsets > 0) {
+              // average number of chars per term
+              float[] charsPerTerm = new float[fieldNums.Length];
+              for (int i = 0; i < charsPerTerm.Length; ++i) {
+                charsPerTerm[i] = Number.IntBitsToFloat(vectorsStream.ReadInt());
+              }
+              startOffsets = readPositions(skip, numFields, flags, numTerms, termFreqs, OFFSETS, totalOffsets, positionIndex);
+              lengths = readPositions(skip, numFields, flags, numTerms, termFreqs, OFFSETS, totalOffsets, positionIndex);
+
+              for (int i = 0; i < numFields; ++i) {
+                int[] fStartOffsets = startOffsets[i];
+                int[] fPositions = positions[i];
+                // patch offsets from positions
+                if (fStartOffsets != null && fPositions != null) {
+                  float fieldCharsPerTerm = charsPerTerm[fieldNumOffs[i]];
+                  for (int j = 0; j < startOffsets[i].Length; ++j) {
+                    fStartOffsets[j] += (int) (fieldCharsPerTerm * fPositions[j]);
+                  }
+                }
+                if (fStartOffsets != null) {
+                  int[] fPrefixLengths = prefixLengths[i];
+                  int[] fSuffixLengths = suffixLengths[i];
+                  int[] fLengths = lengths[i];
+                    //hackmp - TODO - NEEDS REVIEW
+                    //long > int
+                  for (int j = 0, end = (int) numTerms.Get((int)skip + i); j < end; ++j) {
+                    // delta-decode start offsets and  patch lengths using term lengths
+                    int termLength = fPrefixLengths[j] + fSuffixLengths[j];
+                    lengths[i][positionIndex[i][j]] += termLength;
+                    for (int k = positionIndex[i][j] + 1; k < positionIndex[i][j + 1]; ++k) {
+                      fStartOffsets[k] += fStartOffsets[k - 1];
+                      fLengths[k] += termLength;
+                    }
+                  }
+                }
+              }
+            } else {
+              startOffsets = lengths = new int[numFields][];
+            }
+            if (totalPositions > 0) {
+              // delta-decode positions
+              for (int i = 0; i < numFields; ++i) {
+                int[] fPositions = positions[i];
+                int[] fpositionIndex = positionIndex[i];
+                if (fPositions != null) {
+                    //hackmp - TODO - NEED REVIEW
+                    //long > int
+                  for (int j = 0, end = (int) numTerms.Get((int)skip + i); j < end; ++j) {
+                    // delta-decode start offsets
+                    for (int k = fpositionIndex[j] + 1; k < fpositionIndex[j + 1]; ++k) {
+                      fPositions[k] += fPositions[k - 1];
+                    }
+                  }
+                }
+              }
+            }
+
+            // payload lengths
+            int[][] payloadIndex = new int[numFields][];
+            long totalPayloadLength = 0;
+            int payloadOff = 0;
+            int payloadLen = 0;
+            if (totalPayloads > 0) {
+              reader.Reset(vectorsStream, totalPayloads);
+              // skip
+              int termIndex = 0;
+              for (int i = 0; i < skip; ++i) {
+                int f = (int) flags.Get(i);
+                int termCount = (int) numTerms.Get(i);
+                if ((f & PAYLOADS) != 0) {
+                  for (int j = 0; j < termCount; ++j) {
+                    int freq = termFreqs[termIndex + j];
+                    for (int k = 0; k < freq; ++k) {
+                      int l = (int) reader.Next();
+                      payloadOff += l;
+                    }
+                  }
+                }
+                termIndex += termCount;
+              }
+              totalPayloadLength = payloadOff;
+              // read doc payload lengths
+              for (int i = 0; i < numFields; ++i) {
+                  //hackmp - TODO - NEEDS REVIEW
+                  //long > int
+                int f = (int) flags.Get((int)skip + i);
+                int termCount = (int) numTerms.Get((int)skip + i);
+                if ((f & PAYLOADS) != 0) {
+                  int totalFreq = positionIndex[i][termCount];
+                  payloadIndex[i] = new int[totalFreq + 1];
+                  int posIdx = 0;
+                  payloadIndex[i][posIdx] = payloadLen;
+                  for (int j = 0; j < termCount; ++j) {
+                    int freq = termFreqs[termIndex + j];
+                    for (int k = 0; k < freq; ++k) {
+                      int payloadLength = (int) reader.Next();
+                      payloadLen += payloadLength;
+                      payloadIndex[i][posIdx+1] = payloadLen;
+                      ++posIdx;
+                    }
+                  }
+                }
+                termIndex += termCount;
+              }
+              totalPayloadLength += payloadLen;
+              for (long i = skip + numFields; i < totalFields; ++i) {
+                  //hackmp - TODO - NEEDS REVIEW
+                  //long > int
+                int f = (int) flags.Get((int)i);
+                int termCount = (int) numTerms.Get((int)i);
+                if ((f & PAYLOADS) != 0) {
+                  for (int j = 0; j < termCount; ++j) {
+                    int freq = termFreqs[termIndex + j];
+                    for (int k = 0; k < freq; ++k) {
+                      totalPayloadLength += reader.Next();
+                    }
+                  }
+                }
+                termIndex += termCount;
+              }
+            }
+
+            // decompress data
+            BytesRef suffixBytes = new BytesRef();
+            //hackmp - TODO - NEEDS REVIEW
+            //long > int
+            decompressor.Decompress(vectorsStream, (int)totalLen + (int)totalPayloadLength, (int)docOff + (int)payloadOff, (int)docLen + payloadLen, suffixBytes);
+            suffixBytes.length = (int)docLen;
+            BytesRef payloadBytes = new BytesRef(suffixBytes.bytes, suffixBytes.offset + (int)docLen, payloadLen);
+
+            int[] fieldFlags = new int[numFields];
+            for (int i = 0; i < numFields; ++i) {
+                //hackmp - TODO - NEEDS REVIEW
+                //long > int
+              fieldFlags[i] = (int) flags.Get((int)skip + i);
+            }
+
+            int[] fieldNumTerms = new int[numFields];
+            for (int i = 0; i < numFields; ++i) {
+                //hackmp - TODO - NEEDS REVIEW
+              fieldNumTerms[i] = (int) numTerms.Get((int)skip + i);
+            }
+
+            int[][] fieldTermFreqs = new int[numFields][];
+            {
+              long termIdx = 0;
+              for (int i = 0; i < skip; ++i) {
+                termIdx += numTerms.Get(i);
+              }
+              for (int i = 0; i < numFields; ++i) {
+                  //hackmp - TODO - NEEDS REVIEW
+                  //long > int
+                long termCount = (int) numTerms.Get((int)skip + i);
+                fieldTermFreqs[i] = new int[termCount];
+                for (int j = 0; j < termCount; ++j) {
+                  fieldTermFreqs[i][j] = termFreqs[termIdx++];
+                }
+              }
+            }
+
+            return new TVFields(fieldNums, fieldFlags, fieldNumOffs, fieldNumTerms, fieldLengths,
+                prefixLengths, suffixLengths, fieldTermFreqs,
+                positionIndex, positions, startOffsets, lengths,
+                payloadBytes, payloadIndex,
+                suffixBytes);
+        }
+
+        public override object Clone()
+        {
+            return new CompressingTermVectorsReader(this);
+        }
+
+        protected override void Dispose(bool disposing)
+        {
+            throw new NotImplementedException();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d9ad1fea/src/core/Lucene.Net.csproj
----------------------------------------------------------------------
diff --git a/src/core/Lucene.Net.csproj b/src/core/Lucene.Net.csproj
index 85f9818..306396c 100644
--- a/src/core/Lucene.Net.csproj
+++ b/src/core/Lucene.Net.csproj
@@ -188,8 +188,11 @@
     <Compile Include="Codecs\CodecUtil.cs" />
     <Compile Include="Codecs\Compressing\CompressingStoredFieldsFormat.cs" />
     <Compile Include="Codecs\Compressing\CompressingStoredFieldsIndexReader.cs" />
+    <Compile Include="Codecs\Compressing\CompressingStoredFieldsIndexWriter.cs" />
     <Compile Include="Codecs\Compressing\CompressingStoredFieldsReader.cs" />
     <Compile Include="Codecs\Compressing\CompressingStoredFieldsWriter.cs" />
+    <Compile Include="Codecs\Compressing\CompressingTermVectorsFormat.cs" />
+    <Compile Include="Codecs\Compressing\CompressingTermVectorsReader.cs" />
     <Compile Include="Codecs\Compressing\CompressionMode.cs" />
     <Compile Include="Codecs\Compressing\Compressor.cs" />
     <Compile Include="Codecs\Compressing\Decompressor.cs" />


[22/50] [abbrv] git commit: More cleanup

Posted by mh...@apache.org.
More cleanup


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/16ff6a7b
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/16ff6a7b
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/16ff6a7b

Branch: refs/heads/branch_4x
Commit: 16ff6a7b3c233554af33bbdbca18d24244982147
Parents: 80561f7
Author: Paul Irwin <pa...@gmail.com>
Authored: Tue Jul 23 20:24:13 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Jul 23 20:24:13 2013 -0400

----------------------------------------------------------------------
 src/core/Analysis/Analyzer.cs              | 2 +-
 src/core/Codecs/BlockTreeTermsWriter.cs    | 2 +-
 src/core/Index/BufferedDeletesStream.cs    | 8 ++++----
 src/core/Index/ConcurrentMergeScheduler.cs | 8 ++++----
 src/core/Search/BooleanQuery.cs            | 2 +-
 src/core/Search/CachingWrapperFilter.cs    | 2 +-
 src/core/Search/Scorer.cs                  | 2 +-
 src/core/Util/Packed/BlockPackedReader.cs  | 2 +-
 8 files changed, 14 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/16ff6a7b/src/core/Analysis/Analyzer.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Analyzer.cs b/src/core/Analysis/Analyzer.cs
index baeb583..a55cc54 100644
--- a/src/core/Analysis/Analyzer.cs
+++ b/src/core/Analysis/Analyzer.cs
@@ -159,7 +159,7 @@ namespace Lucene.Net.Analysis
 
             public virtual void SetReader(System.IO.TextReader reader)
             {
-                source.SetReader(reader);
+                source.Reader = reader;
             }
 
             public TokenStream TokenStream

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/16ff6a7b/src/core/Codecs/BlockTreeTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/BlockTreeTermsWriter.cs b/src/core/Codecs/BlockTreeTermsWriter.cs
index 14d5444..778eef0 100644
--- a/src/core/Codecs/BlockTreeTermsWriter.cs
+++ b/src/core/Codecs/BlockTreeTermsWriter.cs
@@ -887,7 +887,7 @@ namespace Lucene.Net.Codecs
                 //assert stats.docFreq > 0;
                 //if (DEBUG) System.out.println("BTTW.finishTerm term=" + fieldInfo.name + ":" + toString(text) + " seg=" + segment + " df=" + stats.docFreq);
 
-                blockBuilder.Add(Util.ToIntsRef(text, scratchIntsRef), noOutputs.GetNoOutput());
+                blockBuilder.Add(Lucene.Net.Util.Fst.Util.ToIntsRef(text, scratchIntsRef), noOutputs.GetNoOutput());
                 pending.Add(new PendingTerm(BytesRef.DeepCopyOf(text), stats));
                 parent.postingsWriter.FinishTerm(stats);
                 numTerms++;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/16ff6a7b/src/core/Index/BufferedDeletesStream.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/BufferedDeletesStream.cs b/src/core/Index/BufferedDeletesStream.cs
index 78b2c51..73a3440 100644
--- a/src/core/Index/BufferedDeletesStream.cs
+++ b/src/core/Index/BufferedDeletesStream.cs
@@ -223,7 +223,7 @@ namespace Lucene.Net.Index
                         // Don't delete by Term here; DocumentsWriterPerThread
                         // already did that on flush:
                         delCount += ApplyQueryDeletes(packet.Queries, rld, reader);
-                        int fullDelCount = rld.Info.DelCount + rld.GetPendingDeleteCount();
+                        int fullDelCount = rld.Info.DelCount + rld.PendingDeleteCount;
                         //assert fullDelCount <= rld.info.info.getDocCount();
                         segAllDeletes = fullDelCount == rld.Info.info.DocCount;
                     }
@@ -279,7 +279,7 @@ namespace Lucene.Net.Index
                         {
                             delCount += ApplyTermDeletes(coalescedDeletes.TermsEnumerable, rld, reader);
                             delCount += ApplyQueryDeletes(coalescedDeletes.QueriesEnumerable, rld, reader);
-                            int fullDelCount = rld.Info.DelCount + rld.GetPendingDeleteCount();
+                            int fullDelCount = rld.Info.DelCount + rld.PendingDeleteCount;
                             //assert fullDelCount <= rld.info.info.getDocCount();
                             segAllDeletes = fullDelCount == rld.Info.info.DocCount;
                         }
@@ -413,7 +413,7 @@ namespace Lucene.Net.Index
                 if (!term.Field.Equals(currentField))
                 {
                     //assert currentField == null || currentField.compareTo(term.field()) < 0;
-                    currentField = term.Field();
+                    currentField = term.Field;
                     Terms terms = fields.Terms(currentField);
                     if (terms != null)
                     {
@@ -485,7 +485,7 @@ namespace Lucene.Net.Index
         private static long ApplyQueryDeletes(IEnumerable<QueryAndLimit> queriesIter, ReadersAndLiveDocs rld, SegmentReader reader)
         {
             long delCount = 0;
-            AtomicReaderContext readerContext = reader.Context;
+            AtomicReaderContext readerContext = (AtomicReaderContext)reader.Context;
             bool any = false;
             foreach (QueryAndLimit ent in queriesIter)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/16ff6a7b/src/core/Index/ConcurrentMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ConcurrentMergeScheduler.cs b/src/core/Index/ConcurrentMergeScheduler.cs
index 34c2094..709e784 100644
--- a/src/core/Index/ConcurrentMergeScheduler.cs
+++ b/src/core/Index/ConcurrentMergeScheduler.cs
@@ -364,11 +364,11 @@ namespace Lucene.Net.Index
                     }
                     try
                     {
-                        Wait();
+                        Monitor.Wait(this);
                     }
-                    catch (ThreadInterruptedException ie)
+                    catch (ThreadInterruptedException)
                     {
-                        throw new ThreadInterruptedException(ie);
+                        throw;
                     }
                 }
 
@@ -380,7 +380,7 @@ namespace Lucene.Net.Index
                     }
                 }
 
-                MergePolicy.OneMerge merge = writer.GetNextMerge();
+                MergePolicy.OneMerge merge = writer.NextMerge;
                 if (merge == null)
                 {
                     if (Verbose())

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/16ff6a7b/src/core/Search/BooleanQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/BooleanQuery.cs b/src/core/Search/BooleanQuery.cs
index f2b74ae..3de7417 100644
--- a/src/core/Search/BooleanQuery.cs
+++ b/src/core/Search/BooleanQuery.cs
@@ -259,7 +259,7 @@ namespace Lucene.Net.Search
                     cIter.MoveNext();
                     Weight w = wIter.Current;
                     BooleanClause c = cIter.Current;
-                    if (w.Scorer(context, true, true, context.Reader.LiveDocs) == null)
+                    if (w.Scorer(context, true, true, ((AtomicReader)context.Reader).LiveDocs) == null)
                     {
                         if (c.IsRequired)
                         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/16ff6a7b/src/core/Search/CachingWrapperFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/CachingWrapperFilter.cs b/src/core/Search/CachingWrapperFilter.cs
index d66b44f..e1ad7f9 100644
--- a/src/core/Search/CachingWrapperFilter.cs
+++ b/src/core/Search/CachingWrapperFilter.cs
@@ -83,7 +83,7 @@ namespace Lucene.Net.Search
 
         public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
         {
-            AtomicReader reader = context.Reader;
+            AtomicReader reader = (AtomicReader)context.Reader;
             Object key = reader.CoreCacheKey;
 
             DocIdSet docIdSet = cache[key];

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/16ff6a7b/src/core/Search/Scorer.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Scorer.cs b/src/core/Search/Scorer.cs
index 8e4e5cd..6e067c0 100644
--- a/src/core/Search/Scorer.cs
+++ b/src/core/Search/Scorer.cs
@@ -44,7 +44,7 @@ namespace Lucene.Net.Search
     public abstract class Scorer : DocsEnum
     {
         protected readonly Weight weight;
-        protected virtual Weight Weight { get { return weight; } }
+        protected internal virtual Weight Weight { get { return weight; } }
 
         /// <summary>Constructs a Scorer.</summary>
         /// <param name="similarity">The <c>Similarity</c> implementation used by this scorer.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/16ff6a7b/src/core/Util/Packed/BlockPackedReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Packed/BlockPackedReader.cs b/src/core/Util/Packed/BlockPackedReader.cs
index 446fc7d..05a840e 100644
--- a/src/core/Util/Packed/BlockPackedReader.cs
+++ b/src/core/Util/Packed/BlockPackedReader.cs
@@ -70,7 +70,7 @@ namespace Lucene.Net.Util.Packed
             //assert index >= 0 && index < valueCount;
             int block = (int)Number.URShift(index, blockShift);
             int idx = (int)(index & blockMask);
-            return (minValues == null ? 0 : minValues[block]) + subReaders[block].get(idx);
+            return (minValues == null ? 0 : minValues[block]) + subReaders[block].Get(idx);
         }
     }
 }


[06/50] [abbrv] git commit: Port: csproj checkin

Posted by mh...@apache.org.
Port: csproj checkin


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/2e4e00bd
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/2e4e00bd
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/2e4e00bd

Branch: refs/heads/branch_4x
Commit: 2e4e00bd7e9f2e27b5654c74dc314aa1020fce24
Parents: 06f5d4b
Author: James Blair <jm...@gmail.com>
Authored: Thu Jul 11 16:33:31 2013 -0400
Committer: James Blair <jm...@gmail.com>
Committed: Thu Jul 11 16:33:31 2013 -0400

----------------------------------------------------------------------
 test/core/Lucene.Net.Test.csproj | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2e4e00bd/test/core/Lucene.Net.Test.csproj
----------------------------------------------------------------------
diff --git a/test/core/Lucene.Net.Test.csproj b/test/core/Lucene.Net.Test.csproj
index 1426b60..89f6ca3 100644
--- a/test/core/Lucene.Net.Test.csproj
+++ b/test/core/Lucene.Net.Test.csproj
@@ -546,8 +546,13 @@
     <Compile Include="Util\TestDoubleBarrelLRUCache.cs" />
     <Compile Include="Util\TestFieldCacheSanityChecker.cs" />
     <Compile Include="Util\TestIndexableBinaryStringTools.cs" />
+    <Compile Include="Util\TestIntsRef.cs" />
+    <Compile Include="Util\TestIOUtils.cs" />
+    <Compile Include="Util\TestMaxFailureRule.cs" />
+    <Compile Include="Util\TestNamedSPILoader.cs" />
     <Compile Include="Util\TestNumericUtils.cs" />
     <Compile Include="Util\TestOpenBitSet.cs" />
+    <Compile Include="Util\TestPagedBytes.cs" />
     <Compile Include="Util\TestPriorityQueue.cs">
       <SubType>Code</SubType>
     </Compile>


[27/50] [abbrv] git commit: IT NOW BUILDS!~!!!

Posted by mh...@apache.org.
IT NOW BUILDS!~!!!


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/733dc181
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/733dc181
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/733dc181

Branch: refs/heads/branch_4x
Commit: 733dc181cbbb770d6455c684ce0aabeaef2b1d8f
Parents: 25ec42a
Author: Paul Irwin <pa...@gmail.com>
Authored: Tue Aug 6 16:28:33 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Aug 6 16:28:33 2013 -0400

----------------------------------------------------------------------
 .../Analysis/Tokenattributes/ITermAttribute.cs  | 104 -----
 .../Analysis/Tokenattributes/TermAttribute.cs   | 268 -------------
 src/core/Codecs/Compressing/CompressionMode.cs  |  10 +-
 src/core/Codecs/StoredFieldsWriter.cs           |   4 +-
 src/core/Index/AtomicReader.cs                  |  10 +
 src/core/Index/MultiFields.cs                   |  10 +-
 src/core/Index/MultiTermsEnum.cs                |   2 +-
 src/core/Index/NormsConsumer.cs                 |   2 +-
 src/core/Index/NumericDocValuesWriter.cs        |   2 +-
 src/core/Index/ReadersAndLiveDocs.cs            |   4 +-
 src/core/Index/SegmentInfoPerCommit.cs          |   2 +-
 src/core/Index/SortedDocValuesWriter.cs         |   2 +-
 src/core/Index/SortedSetDocValuesWriter.cs      |   6 +-
 src/core/Index/StandardDirectoryReader.cs       |  15 +-
 src/core/Index/StoredFieldsProcessor.cs         |   2 +-
 src/core/Index/TermContext.cs                   |   2 +-
 src/core/Index/TermVectorsConsumerPerField.cs   |  28 +-
 src/core/Lucene.Net.csproj                      |   3 +-
 src/core/Search/FieldComparator.cs              |  86 ++--
 src/core/Search/MultiPhraseQuery.cs             |   9 +-
 src/core/Search/MultiTermQueryWrapperFilter.cs  |   2 +-
 src/core/Search/NRTManager.cs                   |  10 +-
 src/core/Search/NumericRangeQuery.cs            |   2 +-
 src/core/Search/Payloads/PayloadNearQuery.cs    |  24 +-
 src/core/Search/Payloads/PayloadSpanUtil.cs     |   5 +-
 src/core/Search/Payloads/PayloadTermQuery.cs    | 392 ++++++++++---------
 src/core/Search/PhraseQuery.cs                  |   4 +-
 src/core/Search/QueryWrapperFilter.cs           |   4 +-
 src/core/Search/ReqExclScorer.cs                |   2 +-
 src/core/Search/ScoreCachingWrappingScorer.cs   |   2 +-
 src/core/Search/ScoringRewrite.cs               |   4 +-
 src/core/Search/Similarities/SimilarityBase.cs  |  10 +-
 src/core/Search/Similarities/TFIDFSimilarity.cs |   6 +-
 src/core/Search/Spans/NearSpansOrdered.cs       |   4 +-
 src/core/Search/Spans/NearSpansUnordered.cs     |   2 +-
 .../Search/Spans/SpanNearPayloadCheckQuery.cs   |  11 +-
 src/core/Search/Spans/SpanNearQuery.cs          |   2 +-
 src/core/Search/Spans/SpanPositionCheckQuery.cs |   2 +-
 src/core/Search/Spans/SpanScorer.cs             |   2 +-
 src/core/Search/Spans/SpanTermQuery.cs          |  16 +-
 src/core/Search/Spans/SpanWeight.cs             |   2 +-
 src/core/Search/Spans/TermSpans.cs              |   4 +-
 src/core/Search/TermQuery.cs                    |   6 +-
 src/core/Search/TermRangeFilter.cs              |   4 +-
 src/core/Search/TermRangeQuery.cs               |  24 +-
 src/core/Search/TopDocs.cs                      |   2 +-
 src/core/Search/TopFieldCollector.cs            |  10 +-
 src/core/Store/NIOFSDirectory.cs                |   2 +-
 src/core/Store/SimpleFSDirectory.cs             |   2 +-
 src/core/Support/Deflater.cs                    |  36 ++
 src/core/Support/Inflater.cs                    |  36 ++
 src/core/Support/StringBuilderExtensions.cs     |  28 ++
 src/core/Util/Fst/ReverseBytesReader.cs         |   4 +-
 53 files changed, 534 insertions(+), 703 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Analysis/Tokenattributes/ITermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/ITermAttribute.cs b/src/core/Analysis/Tokenattributes/ITermAttribute.cs
deleted file mode 100644
index 8f9b030..0000000
--- a/src/core/Analysis/Tokenattributes/ITermAttribute.cs
+++ /dev/null
@@ -1,104 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-using Lucene.Net.Util;
-
-namespace Lucene.Net.Analysis.Tokenattributes
-{
-	
-	/// <summary> The term text of a Token.</summary>
-	public interface ITermAttribute:IAttribute
-	{
-	    /// <summary>Returns the Token's term text.
-	    /// 
-	    /// This method has a performance penalty
-	    /// because the text is stored internally in a char[].  If
-	    /// possible, use <see cref="TermBuffer()" /> and <see cref="TermLength()" />
-	    /// directly instead.  If you really need a
-	    /// String, use this method, which is nothing more than
-	    /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
-	    /// </summary>
-	    string Term { get; }
-
-	    /// <summary>Copies the contents of buffer, starting at offset for
-		/// length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		void  SetTermBuffer(char[] buffer, int offset, int length);
-		
-		/// <summary>Copies the contents of buffer into the termBuffer array.</summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		void  SetTermBuffer(System.String buffer);
-		
-		/// <summary>Copies the contents of buffer, starting at offset and continuing
-		/// for length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		void  SetTermBuffer(System.String buffer, int offset, int length);
-		
-		/// <summary>Returns the internal termBuffer character array which
-		/// you can then directly alter.  If the array is too
-		/// small for your token, use <see cref="ResizeTermBuffer(int)" />
-		/// to increase it.  After
-		/// altering the buffer be sure to call <see cref="SetTermLength" />
-		/// to record the number of valid
-		/// characters that were placed into the termBuffer. 
-		/// </summary>
-		char[] TermBuffer();
-		
-		/// <summary>Grows the termBuffer to at least size newSize, preserving the
-		/// existing content. Note: If the next operation is to change
-		/// the contents of the term buffer use
-		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="SetTermBuffer(String)" />, or
-		/// <see cref="SetTermBuffer(String, int, int)" />
-		/// to optimally combine the resize with the setting of the termBuffer.
-		/// </summary>
-		/// <param name="newSize">minimum size of the new termBuffer
-		/// </param>
-		/// <returns> newly created termBuffer with length >= newSize
-		/// </returns>
-		char[] ResizeTermBuffer(int newSize);
-		
-		/// <summary>Return number of valid characters (length of the term)
-		/// in the termBuffer array. 
-		/// </summary>
-		int TermLength();
-		
-		/// <summary>Set number of valid characters (length of the term) in
-		/// the termBuffer array. Use this to truncate the termBuffer
-		/// or to synchronize with external manipulation of the termBuffer.
-		/// Note: to grow the size of the array,
-		/// use <see cref="ResizeTermBuffer(int)" /> first.
-		/// </summary>
-		/// <param name="length">the truncated length
-		/// </param>
-		void  SetTermLength(int length);
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Analysis/Tokenattributes/TermAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/Tokenattributes/TermAttribute.cs b/src/core/Analysis/Tokenattributes/TermAttribute.cs
deleted file mode 100644
index f95402c..0000000
--- a/src/core/Analysis/Tokenattributes/TermAttribute.cs
+++ /dev/null
@@ -1,268 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-using Lucene.Net.Support;
-using ArrayUtil = Lucene.Net.Util.ArrayUtil;
-using Attribute = Lucene.Net.Util.Attribute;
-
-namespace Lucene.Net.Analysis.Tokenattributes
-{
-	
-	/// <summary> The term text of a Token.</summary>
-	[Serializable]
-	public class TermAttribute:Attribute, ITermAttribute, System.ICloneable
-	{
-		private static int MIN_BUFFER_SIZE = 10;
-		
-		private char[] termBuffer;
-		private int termLength;
-
-	    /// <summary>Returns the Token's term text.
-	    /// 
-	    /// This method has a performance penalty
-	    /// because the text is stored internally in a char[].  If
-	    /// possible, use <see cref="TermBuffer()" /> and 
-	    /// <see cref="TermLength()" /> directly instead.  If you 
-	    /// really need a String, use this method, which is nothing more than
-	    /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
-	    /// </summary>
-	    public virtual string Term
-	    {
-	        get
-	        {
-	            InitTermBuffer();
-	            return new System.String(termBuffer, 0, termLength);
-	        }
-	    }
-
-	    /// <summary>Copies the contents of buffer, starting at offset for
-		/// length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		public virtual void  SetTermBuffer(char[] buffer, int offset, int length)
-		{
-			GrowTermBuffer(length);
-			Array.Copy(buffer, offset, termBuffer, 0, length);
-			termLength = length;
-		}
-		
-		/// <summary>Copies the contents of buffer into the termBuffer array.</summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		public virtual void  SetTermBuffer(System.String buffer)
-		{
-			int length = buffer.Length;
-			GrowTermBuffer(length);
-			TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
-			termLength = length;
-		}
-		
-		/// <summary>Copies the contents of buffer, starting at offset and continuing
-		/// for length characters, into the termBuffer array.
-		/// </summary>
-		/// <param name="buffer">the buffer to copy
-		/// </param>
-		/// <param name="offset">the index in the buffer of the first character to copy
-		/// </param>
-		/// <param name="length">the number of characters to copy
-		/// </param>
-		public virtual void  SetTermBuffer(System.String buffer, int offset, int length)
-		{
-			System.Diagnostics.Debug.Assert(offset <= buffer.Length);
-			System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
-			GrowTermBuffer(length);
-			TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
-			termLength = length;
-		}
-		
-		/// <summary>Returns the internal termBuffer character array which
-		/// you can then directly alter.  If the array is too
-		/// small for your token, use <see cref="ResizeTermBuffer(int)" />
-		/// to increase it.  After
-		/// altering the buffer be sure to call <see cref="SetTermLength" />
-		/// to record the number of valid
-		/// characters that were placed into the termBuffer. 
-		/// </summary>
-		public virtual char[] TermBuffer()
-		{
-			InitTermBuffer();
-			return termBuffer;
-		}
-		
-		/// <summary>Grows the termBuffer to at least size newSize, preserving the
-		/// existing content. Note: If the next operation is to change
-		/// the contents of the term buffer use
-		/// <see cref="SetTermBuffer(char[], int, int)" />,
-		/// <see cref="SetTermBuffer(String)" />, or
-		/// <see cref="SetTermBuffer(String, int, int)" />
-		/// to optimally combine the resize with the setting of the termBuffer.
-		/// </summary>
-		/// <param name="newSize">minimum size of the new termBuffer
-		/// </param>
-		/// <returns> newly created termBuffer with length >= newSize
-		/// </returns>
-		public virtual char[] ResizeTermBuffer(int newSize)
-		{
-			if (termBuffer == null)
-			{
-				// The buffer is always at least MIN_BUFFER_SIZE
-				termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
-			}
-			else
-			{
-				if (termBuffer.Length < newSize)
-				{
-					// Not big enough; create a new array with slight
-					// over allocation and preserve content
-					char[] newCharBuffer = new char[ArrayUtil.GetNextSize(newSize)];
-					Array.Copy(termBuffer, 0, newCharBuffer, 0, termBuffer.Length);
-					termBuffer = newCharBuffer;
-				}
-			}
-			return termBuffer;
-		}
-		
-		
-		/// <summary>Allocates a buffer char[] of at least newSize, without preserving the existing content.
-		/// its always used in places that set the content 
-		/// </summary>
-		/// <param name="newSize">minimum size of the buffer
-		/// </param>
-		private void  GrowTermBuffer(int newSize)
-		{
-			if (termBuffer == null)
-			{
-				// The buffer is always at least MIN_BUFFER_SIZE
-				termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
-			}
-			else
-			{
-				if (termBuffer.Length < newSize)
-				{
-					// Not big enough; create a new array with slight
-					// over allocation:
-					termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
-				}
-			}
-		}
-		
-		private void  InitTermBuffer()
-		{
-			if (termBuffer == null)
-			{
-				termBuffer = new char[ArrayUtil.GetNextSize(MIN_BUFFER_SIZE)];
-				termLength = 0;
-			}
-		}
-		
-		/// <summary>Return number of valid characters (length of the term)
-		/// in the termBuffer array. 
-		/// </summary>
-		public virtual int TermLength()
-		{
-			return termLength;
-		}
-		
-		/// <summary>Set number of valid characters (length of the term) in
-		/// the termBuffer array. Use this to truncate the termBuffer
-		/// or to synchronize with external manipulation of the termBuffer.
-		/// Note: to grow the size of the array,
-		/// use <see cref="ResizeTermBuffer(int)" /> first.
-		/// </summary>
-		/// <param name="length">the truncated length
-		/// </param>
-		public virtual void  SetTermLength(int length)
-		{
-			InitTermBuffer();
-			if (length > termBuffer.Length)
-				throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
-			termLength = length;
-		}
-		
-		public override int GetHashCode()
-		{
-			InitTermBuffer();
-			int code = termLength;
-			code = code * 31 + ArrayUtil.HashCode(termBuffer, 0, termLength);
-			return code;
-		}
-		
-		public override void  Clear()
-		{
-			termLength = 0;
-		}
-		
-		public override System.Object Clone()
-		{
-			TermAttribute t = (TermAttribute) base.Clone();
-			// Do a deep clone
-			if (termBuffer != null)
-			{
-				t.termBuffer = new char[termBuffer.Length];
-				termBuffer.CopyTo(t.termBuffer, 0);
-			}
-			return t;
-		}
-		
-		public  override bool Equals(System.Object other)
-		{
-			if (other == this)
-			{
-				return true;
-			}
-			
-			if (other is ITermAttribute)
-			{
-				InitTermBuffer();
-				TermAttribute o = ((TermAttribute) other);
-				o.InitTermBuffer();
-				
-				if (termLength != o.termLength)
-					return false;
-				for (int i = 0; i < termLength; i++)
-				{
-					if (termBuffer[i] != o.termBuffer[i])
-					{
-						return false;
-					}
-				}
-				return true;
-			}
-			
-			return false;
-		}
-		
-		public override System.String ToString()
-		{
-			InitTermBuffer();
-			return "term=" + new System.String(termBuffer, 0, termLength);
-		}
-		
-		public override void  CopyTo(Attribute target)
-		{
-			InitTermBuffer();
-			ITermAttribute t = (ITermAttribute) target;
-			t.SetTermBuffer(termBuffer, 0, termLength);
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Codecs/Compressing/CompressionMode.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressionMode.cs b/src/core/Codecs/Compressing/CompressionMode.cs
index 92f5316..4a91d5c 100644
--- a/src/core/Codecs/Compressing/CompressionMode.cs
+++ b/src/core/Codecs/Compressing/CompressionMode.cs
@@ -193,7 +193,7 @@ namespace Lucene.Net.Codecs.Compressing
 
             public DeflateDecompressor()
             {
-                decompressor = new Inflater();
+                decompressor = SharpZipLib.CreateInflater();
                 compressed = new byte[0];
             }
 
@@ -225,7 +225,7 @@ namespace Lucene.Net.Codecs.Compressing
                     try
                     {
                         int remaining = bytes.bytes.Length - bytes.length;
-                        count = decompressor.Inflate(bytes.bytes, bytes.length, remaining);
+                        count = decompressor.Inflate((byte[])(Array)bytes.bytes, bytes.length, remaining);
                     }
                     catch (FormatException e)
                     {
@@ -274,10 +274,10 @@ namespace Lucene.Net.Codecs.Compressing
             public override void Compress(sbyte[] bytes, int off, int len, DataOutput output)
             {
                 compressor.Reset();
-                compressor.SetInput(bytes, off, len);
+                compressor.SetInput((byte[])(Array)bytes, off, len);
                 compressor.Finish();
 
-                if (compressor.NeedsInput)
+                if (compressor.IsNeedingInput)
                 {
                     // no output
                     output.WriteVInt(0);
@@ -287,7 +287,7 @@ namespace Lucene.Net.Codecs.Compressing
                 int totalCount = 0;
                 for (; ; )
                 {
-                    int count = compressor.Deflate(compressed, totalCount, compressed.Length - totalCount);
+                    int count = compressor.Deflate((byte[])(Array)compressed, totalCount, compressed.Length - totalCount);
                     totalCount += count;
                     if (compressor.IsFinished)
                     {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Codecs/StoredFieldsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/StoredFieldsWriter.cs b/src/core/Codecs/StoredFieldsWriter.cs
index 536d386..86e58ed 100644
--- a/src/core/Codecs/StoredFieldsWriter.cs
+++ b/src/core/Codecs/StoredFieldsWriter.cs
@@ -62,7 +62,7 @@ namespace Lucene.Net.Codecs
             int storedCount = 0;
             foreach (IIndexableField field in doc)
             {
-                if (field.FieldType.Stored)
+                if (field.FieldTypeValue.Stored)
                 {
                     storedCount++;
                 }
@@ -72,7 +72,7 @@ namespace Lucene.Net.Codecs
 
             foreach (IIndexableField field in doc)
             {
-                if (field.FieldType.Stored)
+                if (field.FieldTypeValue.Stored)
                 {
                     WriteField(fieldInfos.FieldInfo(field.Name), field);
                 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/AtomicReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/AtomicReader.cs b/src/core/Index/AtomicReader.cs
index 5981765..b0dc1b6 100644
--- a/src/core/Index/AtomicReader.cs
+++ b/src/core/Index/AtomicReader.cs
@@ -27,6 +27,16 @@ namespace Lucene.Net.Index
             }
         }
 
+        // .NET Port: This is needed since Context can't change return type on override
+        public AtomicReaderContext AtomicContext
+        {
+            get
+            {
+                EnsureOpen();
+                return readerContext;
+            }
+        }
+
         [Obsolete]
         public bool HasNorms(string field)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/MultiFields.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MultiFields.cs b/src/core/Index/MultiFields.cs
index 8f2a961..3ed64aa 100644
--- a/src/core/Index/MultiFields.cs
+++ b/src/core/Index/MultiFields.cs
@@ -23,13 +23,13 @@ namespace Lucene.Net.Index
                     return null;
                 case 1:
                     // already an atomic reader / reader with one leave
-                    return leaves[0].Reader.Fields;
+                    return leaves[0].AtomicReader.Fields;
                 default:
                     IList<Fields> fields = new List<Fields>();
                     IList<ReaderSlice> slices = new List<ReaderSlice>();
                     foreach (AtomicReaderContext ctx in leaves)
                     {
-                        AtomicReader r = ctx.Reader;
+                        AtomicReader r = ctx.AtomicReader;
                         Fields f = r.Fields;
                         if (f != null)
                         {
@@ -61,7 +61,7 @@ namespace Lucene.Net.Index
                 //assert size > 0 : "A reader with deletions must have at least one leave";
                 if (size == 1)
                 {
-                    return leaves[0].Reader.LiveDocs;
+                    return leaves[0].AtomicReader.LiveDocs;
                 }
                 IBits[] liveDocs = new IBits[size];
                 int[] starts = new int[size + 1];
@@ -69,7 +69,7 @@ namespace Lucene.Net.Index
                 {
                     // record all liveDocs, even if they are null
                     AtomicReaderContext ctx = leaves[i];
-                    liveDocs[i] = ctx.Reader.LiveDocs;
+                    liveDocs[i] = ctx.AtomicReader.LiveDocs;
                     starts[i] = ctx.docBase;
                 }
                 starts[size] = reader.MaxDoc;
@@ -199,7 +199,7 @@ namespace Lucene.Net.Index
             FieldInfos.Builder builder = new FieldInfos.Builder();
             foreach (AtomicReaderContext ctx in reader.Leaves)
             {
-                builder.Add(ctx.Reader.FieldInfos);
+                builder.Add(ctx.AtomicReader.FieldInfos);
             }
             return builder.Finish();
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/MultiTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MultiTermsEnum.cs b/src/core/Index/MultiTermsEnum.cs
index 164b33b..2a9dd35 100644
--- a/src/core/Index/MultiTermsEnum.cs
+++ b/src/core/Index/MultiTermsEnum.cs
@@ -119,7 +119,7 @@ namespace Lucene.Net.Index
                 }
             }
 
-            if (queue.Size() == 0)
+            if (queue.Size == 0)
             {
                 return TermsEnum.EMPTY;
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/NormsConsumer.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/NormsConsumer.cs b/src/core/Index/NormsConsumer.cs
index b166975..afd7fd5 100644
--- a/src/core/Index/NormsConsumer.cs
+++ b/src/core/Index/NormsConsumer.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Index
             {
                 if (state.fieldInfos.HasNorms)
                 {
-                    NormsFormat normsFormat = state.segmentInfo.Codec.NormsFormat();
+                    NormsFormat normsFormat = state.segmentInfo.Codec.NormsFormat;
                     //assert normsFormat != null;
                     normsConsumer = normsFormat.NormsConsumer(state);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/NumericDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/NumericDocValuesWriter.cs b/src/core/Index/NumericDocValuesWriter.cs
index 35542cd..9de0e54 100644
--- a/src/core/Index/NumericDocValuesWriter.cs
+++ b/src/core/Index/NumericDocValuesWriter.cs
@@ -70,7 +70,7 @@ namespace Lucene.Net.Index
         {
             // .NET Port: using yield return instead of custom iterator type. Much less code.
 
-            AppendingLongBuffer.Iterator iter = pending.GetIterator();
+            AbstractAppendingLongBuffer.Iterator iter = pending.GetIterator();
             int size = (int)pending.Size;
             int upto = 0;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/ReadersAndLiveDocs.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ReadersAndLiveDocs.cs b/src/core/Index/ReadersAndLiveDocs.cs
index 0fb3aa2..28ae366 100644
--- a/src/core/Index/ReadersAndLiveDocs.cs
+++ b/src/core/Index/ReadersAndLiveDocs.cs
@@ -302,7 +302,7 @@ namespace Lucene.Net.Index
                     // SegmentReader sharing the current liveDocs
                     // instance; must now make a private clone so we can
                     // change it:
-                    LiveDocsFormat liveDocsFormat = info.info.Codec.LiveDocsFormat();
+                    LiveDocsFormat liveDocsFormat = info.info.Codec.LiveDocsFormat;
                     if (liveDocs == null)
                     {
                         //System.out.println("create BV seg=" + info);
@@ -388,7 +388,7 @@ namespace Lucene.Net.Index
                 bool success = false;
                 try
                 {
-                    info.info.Codec.LiveDocsFormat().WriteLiveDocs((IMutableBits)liveDocs, trackingDir, info, pendingDeleteCount, IOContext.DEFAULT);
+                    info.info.Codec.LiveDocsFormat.WriteLiveDocs((IMutableBits)liveDocs, trackingDir, info, pendingDeleteCount, IOContext.DEFAULT);
                     success = true;
                 }
                 finally

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/SegmentInfoPerCommit.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentInfoPerCommit.cs b/src/core/Index/SegmentInfoPerCommit.cs
index 9434936..603446b 100644
--- a/src/core/Index/SegmentInfoPerCommit.cs
+++ b/src/core/Index/SegmentInfoPerCommit.cs
@@ -72,7 +72,7 @@ namespace Lucene.Net.Index
                 ICollection<String> files = new HashSet<String>(info.Files);
 
                 // Must separately add any live docs files:
-                info.Codec.LiveDocsFormat().Files(this, files);
+                info.Codec.LiveDocsFormat.Files(this, files);
 
                 return files;
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/SortedDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SortedDocValuesWriter.cs b/src/core/Index/SortedDocValuesWriter.cs
index f25c457..cc6a23c 100644
--- a/src/core/Index/SortedDocValuesWriter.cs
+++ b/src/core/Index/SortedDocValuesWriter.cs
@@ -135,7 +135,7 @@ namespace Lucene.Net.Index
         {
             // .NET Port: using yield return instead of custom iterator type. Much less code.
 
-            AppendingLongBuffer.Iterator iter = pending.GetIterator();
+            AbstractAppendingLongBuffer.Iterator iter = pending.GetIterator();
             int docUpto = 0;
 
             while (docUpto < maxDoc)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/SortedSetDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SortedSetDocValuesWriter.cs b/src/core/Index/SortedSetDocValuesWriter.cs
index 23998e4..9cddcef 100644
--- a/src/core/Index/SortedSetDocValuesWriter.cs
+++ b/src/core/Index/SortedSetDocValuesWriter.cs
@@ -178,8 +178,8 @@ namespace Lucene.Net.Index
         {
             // .NET Port: using yield return instead of custom iterator type. Much less code.
 
-            AppendingLongBuffer.Iterator iter = pending.GetIterator();
-            AppendingLongBuffer.Iterator counts = pendingCounts.GetIterator();
+            AbstractAppendingLongBuffer.Iterator iter = pending.GetIterator();
+            AbstractAppendingLongBuffer.Iterator counts = pendingCounts.GetIterator();
             long numOrds = pending.Size;
             long ordUpto = 0L;
 
@@ -212,7 +212,7 @@ namespace Lucene.Net.Index
         {
             // .NET Port: using yield return instead of custom iterator type. Much less code.
 
-            AppendingLongBuffer.Iterator iter = pendingCounts.GetIterator();
+            AbstractAppendingLongBuffer.Iterator iter = pendingCounts.GetIterator();
             int docUpto = 0;
 
             while (docUpto < maxDoc)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/StandardDirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/StandardDirectoryReader.cs b/src/core/Index/StandardDirectoryReader.cs
index 4ec5e91..55d222e 100644
--- a/src/core/Index/StandardDirectoryReader.cs
+++ b/src/core/Index/StandardDirectoryReader.cs
@@ -28,9 +28,12 @@ namespace Lucene.Net.Index
 
         private sealed class AnonymousOpenFindSegmentsFile : SegmentInfos.FindSegmentsFile
         {
-            public AnonymousOpenFindSegmentsFile(Directory dir)
+            private readonly int termInfosIndexDivisor;
+
+            public AnonymousOpenFindSegmentsFile(Directory dir, int termInfosIndexDivisor)
                 : base(dir)
             {
+                this.termInfosIndexDivisor = termInfosIndexDivisor;
             }
 
             protected override object DoBody(string segmentFileName)
@@ -63,7 +66,7 @@ namespace Lucene.Net.Index
 
         internal static DirectoryReader Open(Directory directory, IndexCommit commit, int termInfosIndexDivisor)
         {
-            return (DirectoryReader)new AnonymousOpenFindSegmentsFile(directory).Run(commit);
+            return (DirectoryReader)new AnonymousOpenFindSegmentsFile(directory, termInfosIndexDivisor).Run(commit);
         }
 
         internal static DirectoryReader Open(IndexWriter writer, SegmentInfos infos, bool applyAllDeletes)
@@ -94,7 +97,7 @@ namespace Lucene.Net.Index
                     try
                     {
                         SegmentReader reader = rld.GetReadOnlyClone(IOContext.READ);
-                        if (reader.NumDocs > 0 || writer.KeepFullyDeletedSegments)
+                        if (reader.NumDocs > 0 || writer.GetKeepFullyDeletedSegments())
                         {
                             // Steal the ref:
                             readers.Add(reader);
@@ -103,7 +106,7 @@ namespace Lucene.Net.Index
                         else
                         {
                             reader.Dispose();
-                            segmentInfos.Remove(infosUpto);
+                            segmentInfos.RemoveAt(infosUpto);
                         }
                     }
                     finally
@@ -155,7 +158,7 @@ namespace Lucene.Net.Index
             for (int i = infos.Count - 1; i >= 0; i--)
             {
                 // find SegmentReader for this segment
-                int oldReaderIndex = segmentReaders[infos.Info(i).info.Name];
+                int oldReaderIndex = segmentReaders[infos.Info(i).info.name];
                 if (oldReaderIndex == null)
                 {
                     // this is a new segment, no old SegmentReader can be reused
@@ -395,7 +398,7 @@ namespace Lucene.Net.Index
             get
             {
                 EnsureOpen();
-                if (writer == null || writer.IsClosed())
+                if (writer == null || writer.IsClosed)
                 {
                     // Fully read the segments file: this ensures that it's
                     // completely written so that if

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/StoredFieldsProcessor.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/StoredFieldsProcessor.cs b/src/core/Index/StoredFieldsProcessor.cs
index 0f119ac..351a24e 100644
--- a/src/core/Index/StoredFieldsProcessor.cs
+++ b/src/core/Index/StoredFieldsProcessor.cs
@@ -132,7 +132,7 @@ namespace Lucene.Net.Index
 
         public override void AddField(int docID, IIndexableField field, FieldInfo fieldInfo)
         {
-            if (field.FieldType.Stored)
+            if (field.FieldTypeValue.Stored)
             {
                 if (numStoredFields == storedFields.Length)
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/TermContext.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/TermContext.cs b/src/core/Index/TermContext.cs
index 1d1410a..932d295 100644
--- a/src/core/Index/TermContext.cs
+++ b/src/core/Index/TermContext.cs
@@ -47,7 +47,7 @@ namespace Lucene.Net.Index
             foreach (AtomicReaderContext ctx in context.Leaves)
             {
                 //if (DEBUG) System.out.println("  r=" + leaves[i].reader);
-                Fields fields = ctx.Reader.Fields;
+                Fields fields = ctx.AtomicReader.Fields;
                 if (fields != null)
                 {
                     Terms terms = fields.Terms(field);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Index/TermVectorsConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/TermVectorsConsumerPerField.cs b/src/core/Index/TermVectorsConsumerPerField.cs
index adda8ec..1c0a1fd 100644
--- a/src/core/Index/TermVectorsConsumerPerField.cs
+++ b/src/core/Index/TermVectorsConsumerPerField.cs
@@ -70,37 +70,37 @@ namespace Lucene.Net.Index
                     }
                     else
                     {
-                        if (field.FieldType.StoreTermVectorOffsets)
+                        if (field.FieldTypeValue.StoreTermVectorOffsets)
                         {
-                            throw new ArgumentException("cannot index term vector offsets when term vectors are not indexed (field=\"" + field.name());
+                            throw new ArgumentException("cannot index term vector offsets when term vectors are not indexed (field=\"" + field.Name);
                         }
-                        if (field.FieldType.StoreTermVectorPositions)
+                        if (field.FieldTypeValue.StoreTermVectorPositions)
                         {
-                            throw new ArgumentException("cannot index term vector positions when term vectors are not indexed (field=\"" + field.name());
+                            throw new ArgumentException("cannot index term vector positions when term vectors are not indexed (field=\"" + field.Name);
                         }
-                        if (field.FieldType.StoreTermVectorPayloads)
+                        if (field.FieldTypeValue.StoreTermVectorPayloads)
                         {
-                            throw new ArgumentException("cannot index term vector payloads when term vectors are not indexed (field=\"" + field.name());
+                            throw new ArgumentException("cannot index term vector payloads when term vectors are not indexed (field=\"" + field.Name);
                         }
                     }
                 }
                 else
                 {
-                    if (field.FieldType.StoreTermVectors)
+                    if (field.FieldTypeValue.StoreTermVectors)
                     {
-                        throw new ArgumentException("cannot index term vectors when field is not indexed (field=\"" + field.name());
+                        throw new ArgumentException("cannot index term vectors when field is not indexed (field=\"" + field.Name);
                     }
-                    if (field.FieldType.StoreTermVectorOffsets)
+                    if (field.FieldTypeValue.StoreTermVectorOffsets)
                     {
-                        throw new ArgumentException("cannot index term vector offsets when field is not indexed (field=\"" + field.name());
+                        throw new ArgumentException("cannot index term vector offsets when field is not indexed (field=\"" + field.Name);
                     }
-                    if (field.FieldType.StoreTermVectorPositions)
+                    if (field.FieldTypeValue.StoreTermVectorPositions)
                     {
-                        throw new ArgumentException("cannot index term vector positions when field is not indexed (field=\"" + field.name());
+                        throw new ArgumentException("cannot index term vector positions when field is not indexed (field=\"" + field.Name);
                     }
-                    if (field.FieldType.StoreTermVectorPayloads)
+                    if (field.FieldTypeValue.StoreTermVectorPayloads)
                     {
-                        throw new ArgumentException("cannot index term vector payloads when field is not indexed (field=\"" + field.name());
+                        throw new ArgumentException("cannot index term vector payloads when field is not indexed (field=\"" + field.Name);
                     }
                 }
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Lucene.Net.csproj
----------------------------------------------------------------------
diff --git a/src/core/Lucene.Net.csproj b/src/core/Lucene.Net.csproj
index 4b17cab..a6f3de8 100644
--- a/src/core/Lucene.Net.csproj
+++ b/src/core/Lucene.Net.csproj
@@ -163,9 +163,7 @@
     <Compile Include="Analysis\Tokenattributes\PayloadAttribute.cs" />
     <Compile Include="Analysis\Tokenattributes\IPositionIncrementAttribute.cs" />
     <Compile Include="Analysis\Tokenattributes\PositionIncrementAttribute.cs" />
-    <Compile Include="Analysis\Tokenattributes\ITermAttribute.cs" />
     <Compile Include="Analysis\Tokenattributes\PositionLengthAttribute.cs" />
-    <Compile Include="Analysis\Tokenattributes\TermAttribute.cs" />
     <Compile Include="Analysis\Tokenattributes\ITypeAttribute.cs" />
     <Compile Include="Analysis\Tokenattributes\TypeAttribute.cs" />
     <Compile Include="Analysis\TokenFilter.cs">
@@ -920,6 +918,7 @@
     <Compile Include="Support\IChecksum.cs">
       <SubType>Code</SubType>
     </Compile>
+    <Compile Include="Support\StringBuilderExtensions.cs" />
     <Compile Include="Support\StringTokenizer.cs" />
     <Compile Include="Support\TaskSchedulerCompletionService.cs" />
     <Compile Include="Support\ThreadFactory.cs" />

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/FieldComparator.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FieldComparator.cs b/src/core/Search/FieldComparator.cs
index 7caead2..6f5083d 100644
--- a/src/core/Search/FieldComparator.cs
+++ b/src/core/Search/FieldComparator.cs
@@ -79,17 +79,6 @@ namespace Lucene.Net.Search
     {
         // .NET Port: this class doesn't line-by-line match up with java due to use of non-generic casting.
         // see FieldComparator below.
-
-        /// <summary> Set a new Reader. All doc correspond to the current Reader.
-        /// 
-        /// </summary>
-        /// <param name="reader">current reader
-        /// </param>
-        /// <param name="docBase">docBase of this reader 
-        /// </param>
-        /// <throws>  IOException </throws>
-        /// <throws>  IOException </throws>
-        public abstract FieldComparator<T> SetNextReader(AtomicReaderContext context);
         
         /// <summary> Return the actual value in the slot.
         /// 
@@ -129,7 +118,21 @@ namespace Lucene.Net.Search
         }
 
         public abstract int CompareDocToValue(int doc, T value);
+        
+        public abstract override int Compare(int slot1, int slot2);
+
+        public abstract override void SetBottom(int slot);
+
+        public abstract override int CompareBottom(int doc);
 
+        public abstract override void Copy(int slot, int doc);
+
+        public abstract override FieldComparator SetNextReader(AtomicReaderContext context);
+
+        public override int CompareDocToObjectValue(int doc, object value)
+        {
+            return CompareDocToValue(doc, (T)value);
+        }
     }
 
     // .NET Port: Using a non-generic class here so that we avoid having to use the 
@@ -190,6 +193,17 @@ namespace Lucene.Net.Search
         /// <param name="doc">docID relative to current reader
         /// </param>
         public abstract void Copy(int slot, int doc);
+        
+        /// <summary> Set a new Reader. All doc correspond to the current Reader.
+        /// 
+        /// </summary>
+        /// <param name="reader">current reader
+        /// </param>
+        /// <param name="docBase">docBase of this reader 
+        /// </param>
+        /// <throws>  IOException </throws>
+        /// <throws>  IOException </throws>
+        public abstract FieldComparator SetNextReader(AtomicReaderContext context);
 
         /// <summary>Sets the Scorer to use in case a document's score is
         /// needed.
@@ -206,6 +220,32 @@ namespace Lucene.Net.Search
 
         public abstract object Value(int slot);
 
+        public int CompareValues(object first, object second)
+        {
+            if (first == null)
+            {
+                if (second == null)
+                {
+                    return 0;
+                }
+                else
+                {
+                    return -1;
+                }
+            }
+            else if (second == null)
+            {
+                return 1;
+            }
+            else
+            {
+                return ((IComparable<object>)first).CompareTo(second);
+            }
+        }
+
+        // .NET Port: a non-generic version of this method
+        public abstract int CompareDocToObjectValue(int doc, object value);
+
         public abstract class NumericComparator<T> : FieldComparator<T>
             where T : struct
         {
@@ -219,7 +259,7 @@ namespace Lucene.Net.Search
                 this.missingValue = missingValue;
             }
 
-            public override FieldComparator<T> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 if (missingValue != null)
                 {
@@ -285,7 +325,7 @@ namespace Lucene.Net.Search
                 values[slot] = v2;
             }
 
-            public override FieldComparator<sbyte> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
@@ -365,7 +405,7 @@ namespace Lucene.Net.Search
                 values[slot] = v2;
             }
 
-            public override FieldComparator<double> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
@@ -446,7 +486,7 @@ namespace Lucene.Net.Search
                 values[slot] = v2;
             }
 
-            public override FieldComparator<float> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
@@ -526,7 +566,7 @@ namespace Lucene.Net.Search
                 values[slot] = v2;
             }
 
-            public override FieldComparator<short> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
@@ -638,7 +678,7 @@ namespace Lucene.Net.Search
                 values[slot] = v2;
             }
 
-            public override FieldComparator<int> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
@@ -757,7 +797,7 @@ namespace Lucene.Net.Search
                 values[slot] = v2;
             }
 
-            public override FieldComparator<long> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
@@ -836,7 +876,7 @@ namespace Lucene.Net.Search
                 //assert !Float.isNaN(scores[slot]);
             }
 
-            public override FieldComparator<float> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 return this;
             }
@@ -911,7 +951,7 @@ namespace Lucene.Net.Search
                 docIDs[slot] = docBase + doc;
             }
 
-            public override FieldComparator<int> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 // TODO: can we "map" our docIDs to the current
                 // reader? saves having to then subtract on every
@@ -1033,7 +1073,7 @@ namespace Lucene.Net.Search
                     this.parent = parent;
                 }
 
-                public override FieldComparator<BytesRef> SetNextReader(AtomicReaderContext context)
+                public override FieldComparator SetNextReader(AtomicReaderContext context)
                 {
                     return parent.SetNextReader(context);
                 }
@@ -1131,7 +1171,7 @@ namespace Lucene.Net.Search
                 }
             }
 
-            public override FieldComparator<BytesRef> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 int docBase = context.docBase;
                 termsIndex = FieldCache.DEFAULT.GetTermsIndex(context.AtomicReader, field);
@@ -1252,7 +1292,7 @@ namespace Lucene.Net.Search
                 docTerms.Get(doc, values[slot]);
             }
 
-            public override FieldComparator<BytesRef> SetNextReader(AtomicReaderContext context)
+            public override FieldComparator SetNextReader(AtomicReaderContext context)
             {
                 docTerms = FieldCache.DEFAULT.GetTerms(context.AtomicReader, field);
                 return this;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/MultiPhraseQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/MultiPhraseQuery.cs b/src/core/Search/MultiPhraseQuery.cs
index c525784..57cfe09 100644
--- a/src/core/Search/MultiPhraseQuery.cs
+++ b/src/core/Search/MultiPhraseQuery.cs
@@ -142,6 +142,7 @@ namespace Lucene.Net.Search
             {
                 this.parent = parent;
                 this.similarity = searcher.Similarity;
+                IndexReaderContext context = searcher.TopReaderContext;
 
                 // compute idf
                 var allTermStats = new List<TermStatistics>();
@@ -182,7 +183,7 @@ namespace Lucene.Net.Search
                                           IBits acceptDocs)
             {
                 //assert !termArrays.isEmpty();
-                var reader = context.Reader;
+                var reader = context.AtomicReader;
                 var liveDocs = acceptDocs;
 
                 var postingsFreqs = new PhraseQuery.PostingsAndFreq[parent.termArrays.Count];
@@ -284,7 +285,7 @@ namespace Lucene.Net.Search
 
             public override Explanation Explain(AtomicReaderContext context, int doc)
             {
-                var scorer = Scorer(context, true, false, context.Reader.LiveDocs);
+                var scorer = Scorer(context, true, false, context.AtomicReader.LiveDocs);
                 if (scorer != null)
                 {
                     var newDoc = scorer.Advance(doc);
@@ -316,7 +317,7 @@ namespace Lucene.Net.Search
             if (!termArrays.Any())
             {
                 var bq = new BooleanQuery();
-                bq.Boost = Boost);
+                bq.Boost = Boost;
                 return bq;
             }
             else if (termArrays.Count == 1)
@@ -325,7 +326,7 @@ namespace Lucene.Net.Search
                 var boq = new BooleanQuery(true);
                 foreach (var t in terms)
                 {
-                    boq.Add(new TermQuery(t), BooleanClause.Occur.SHOULD);
+                    boq.Add(new TermQuery(t), Occur.SHOULD);
                 }
                 boq.Boost = Boost;
                 return boq;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/MultiTermQueryWrapperFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/MultiTermQueryWrapperFilter.cs b/src/core/Search/MultiTermQueryWrapperFilter.cs
index d657e58..38b933b 100644
--- a/src/core/Search/MultiTermQueryWrapperFilter.cs
+++ b/src/core/Search/MultiTermQueryWrapperFilter.cs
@@ -79,7 +79,7 @@ namespace Lucene.Net.Search
 
         public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
         {
-            var reader = context.Reader;
+            var reader = context.AtomicReader;
             var fields = reader.Fields;
             if (fields == null)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/NRTManager.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/NRTManager.cs b/src/core/Search/NRTManager.cs
index 7eaa5bb..b6f4b27 100644
--- a/src/core/Search/NRTManager.cs
+++ b/src/core/Search/NRTManager.cs
@@ -29,7 +29,7 @@ namespace Lucene.Net.Search
                 searcherFactory = new SearcherFactory();
             }
             this.searcherFactory = searcherFactory;
-            Current = SearcherManager.GetSearcher(searcherFactory, DirectoryReader.Open(writer.IndexWriter, applyAllDeletes));
+            current = SearcherManager.GetSearcher(searcherFactory, DirectoryReader.Open(writer.IndexWriter, applyAllDeletes));
         }
 
         protected override void DecRef(IndexSearcher reference)
@@ -67,25 +67,25 @@ namespace Lucene.Net.Search
                 this.writer = writer;
             }
 
-            public virtual long UpdateDocument(Term t, IIndexableField d, Analyzer a)
+            public virtual long UpdateDocument(Term t, IEnumerable<IIndexableField> d, Analyzer a)
             {
                 writer.UpdateDocument(t, d, a);
                 return Interlocked.Read(ref indexingGen);
             }
 
-            public virtual long UpdateDocument(Term t, IIndexableField d)
+            public virtual long UpdateDocument(Term t, IEnumerable<IIndexableField> d)
             {
                 writer.UpdateDocument(t, d);
                 return Interlocked.Read(ref indexingGen);
             }
 
-            public virtual long UpdateDocuments(Term t, IEnumerable<IIndexableField> docs, Analyzer a)
+            public virtual long UpdateDocuments(Term t, IEnumerable<IEnumerable<IIndexableField>> docs, Analyzer a)
             {
                 writer.UpdateDocuments(t, docs, a);
                 return Interlocked.Read(ref indexingGen);
             }
 
-            public virtual long UpdateDocuments(Term t, IEnumerable<IIndexableField> docs)
+            public virtual long UpdateDocuments(Term t, IEnumerable<IEnumerable<IIndexableField>> docs)
             {
                 writer.UpdateDocuments(t, docs);
                 return Interlocked.Read(ref indexingGen);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/NumericRangeQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/NumericRangeQuery.cs b/src/core/Search/NumericRangeQuery.cs
index 1b4e90c..c1a2acb 100644
--- a/src/core/Search/NumericRangeQuery.cs
+++ b/src/core/Search/NumericRangeQuery.cs
@@ -177,7 +177,7 @@ namespace Lucene.Net.Search
             {
                 return TermsEnum.EMPTY;
             }
-            return new NumericRangeTermsEnum(terms.Iterator(null));
+            return new NumericRangeTermsEnum(this, terms.Iterator(null));
         }
 
 	    /// <summary>Returns the field name for this query </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Payloads/PayloadNearQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Payloads/PayloadNearQuery.cs b/src/core/Search/Payloads/PayloadNearQuery.cs
index ddef9cb..6159fad 100644
--- a/src/core/Search/Payloads/PayloadNearQuery.cs
+++ b/src/core/Search/Payloads/PayloadNearQuery.cs
@@ -112,15 +112,18 @@ namespace Lucene.Net.Search.Payloads
 
         public class PayloadNearSpanScorer : SpanScorer
         {
+            private readonly PayloadNearQuery parent;
+
             private readonly BytesRef scratch = new BytesRef();
-            protected float payloadScore;
+            protected internal float payloadScore;
             internal int payloadsSeen;
             private SpansBase spans;
 
-            protected PayloadNearSpanScorer(SpansBase spans, Weight weight,
+            public PayloadNearSpanScorer(PayloadNearQuery parent, SpansBase spans, Weight weight,
                                             Similarity similarity, Similarity.SloppySimScorer docScorer)
                 : base(spans, weight, docScorer)
             {
+                this.parent = parent;
                 this.spans = spans;
             }
 
@@ -159,7 +162,7 @@ namespace Lucene.Net.Search.Payloads
                     scratch.bytes = thePayload;
                     scratch.offset = 0;
                     scratch.length = thePayload.Length;
-                    payloadScore = function.CurrentScore(doc, fieldName, start, end,
+                    payloadScore = parent.function.CurrentScore(doc, parent.fieldName, start, end,
                                                          payloadsSeen, payloadScore, docScorer.ComputePayloadFactor(doc, spans.Start, spans.End, scratch));
                     ++payloadsSeen;
                 }
@@ -179,7 +182,7 @@ namespace Lucene.Net.Search.Payloads
                 {
                     int matchLength = spans.End - spans.Start;
                     freq += docScorer.ComputeSlopFactor(matchLength);
-                    var spansArr = new Spans[1];
+                    var spansArr = new SpansBase[1];
                     spansArr[0] = spans;
                     GetPayloads(spansArr);
                     more = spans.Next();
@@ -190,21 +193,24 @@ namespace Lucene.Net.Search.Payloads
             public float Score()
             {
                 return base.Score()
-                       * function.DocScore(doc, fieldName, payloadsSeen, payloadScore);
+                       * parent.function.DocScore(doc, parent.fieldName, payloadsSeen, payloadScore);
             }
         }
 
         public class PayloadNearSpanWeight : SpanWeight
         {
-            public PayloadNearSpanWeight(SpanQuery query, IndexSearcher searcher)
+            private readonly PayloadNearQuery parent;
+
+            public PayloadNearSpanWeight(PayloadNearQuery query, IndexSearcher searcher)
                 : base(query, searcher)
             {
+                this.parent = query;
             }
-
+            
             public override Scorer Scorer(AtomicReaderContext context, bool scoreDocsInOrder,
                                           bool topScorer, IBits acceptDocs)
             {
-                return new PayloadNearSpanScorer(query.GetSpans(context, acceptDocs, termContexts), this,
+                return new PayloadNearSpanScorer(parent, query.GetSpans(context, acceptDocs, termContexts), this,
                                                  similarity, similarity.GetSloppySimScorer(stats, context));
             }
 
@@ -226,7 +232,7 @@ namespace Lucene.Net.Search.Payloads
                         expl.Value = scoreExplanation.Value;
                         String field = ((SpanQuery)Query).Field;
                         // now the payloads part
-                        Explanation payloadExpl = function.Explain(doc, field, scorer.payloadsSeen, scorer.payloadScore);
+                        Explanation payloadExpl = parent.function.Explain(doc, field, scorer.payloadsSeen, scorer.payloadScore);
                         // combined
                         var result = new ComplexExplanation();
                         result.AddDetail(expl);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Payloads/PayloadSpanUtil.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Payloads/PayloadSpanUtil.cs b/src/core/Search/Payloads/PayloadSpanUtil.cs
index 11f5249..3834e60 100644
--- a/src/core/Search/Payloads/PayloadSpanUtil.cs
+++ b/src/core/Search/Payloads/PayloadSpanUtil.cs
@@ -1,4 +1,5 @@
 using System.Collections.Generic;
+using System.Linq;
 using Lucene.Net.Index;
 using Lucene.Net.Search;
 using Lucene.Net.Search.Spans;
@@ -115,7 +116,7 @@ public class PayloadSpanUtil
                     IList<Query> disjuncts = disjunctLists[i];
                     if (disjuncts != null)
                     {
-                        clauses[position++] = new SpanOrQuery(disjuncts.ToArray(new SpanQuery[disjuncts.Count]));
+                        clauses[position++] = new SpanOrQuery(disjuncts.OfType<SpanQuery>().ToArray());
                     }
                     else
                     {
@@ -145,7 +146,7 @@ public class PayloadSpanUtil
         }
         foreach (AtomicReaderContext atomicReaderContext in context.Leaves)
         {
-            SpansBase spans = query.GetSpans(atomicReaderContext, atomicReaderContext.Reader.LiveDocs, termContexts);
+            SpansBase spans = query.GetSpans(atomicReaderContext, atomicReaderContext.AtomicReader.LiveDocs, termContexts);
             while (spans.Next())
             {
                 if (spans.IsPayloadAvailable())

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Payloads/PayloadTermQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Payloads/PayloadTermQuery.cs b/src/core/Search/Payloads/PayloadTermQuery.cs
index 3426bac..6b9db10 100644
--- a/src/core/Search/Payloads/PayloadTermQuery.cs
+++ b/src/core/Search/Payloads/PayloadTermQuery.cs
@@ -6,185 +6,227 @@ using Lucene.Net.Search.Similarities;
 using Lucene.Net.Search.Spans;
 using Lucene.Net.Util;
 
-public class PayloadTermQuery : SpanTermQuery {
-  protected PayloadFunction function;
-  private bool includeSpanScore;
-
-    public PayloadTermQuery(Term term, PayloadFunction function) : this(term, function, true) {}
-
-    public PayloadTermQuery(Term term, PayloadFunction function,
-      bool includeSpanScore) : base(term) {
-    this.function = function;
-    this.includeSpanScore = includeSpanScore;
-  }
-
-  public override Weight CreateWeight(IndexSearcher searcher)  {
-    return new PayloadTermWeight(this, searcher);
-  }
-
-  protected class PayloadTermWeight : SpanWeight {
-
-    public PayloadTermWeight(PayloadTermQuery query, IndexSearcher searcher) : base(query, searcher) {
-    }
-
-    public override Scorer Scorer(AtomicReaderContext context, bool scoreDocsInOrder,
-        bool topScorer, IBits acceptDocs)  {
-      return new PayloadTermSpanScorer((TermSpans) query.GetSpans(context, acceptDocs, termContexts),
-          this, similarity.GetSloppySimScorer(stats, context));
-    }
-
-    protected class PayloadTermSpanScorer : SpanScorer {
-      protected BytesRef payload;
-      protected float payloadScore;
-      protected int payloadsSeen;
-      private readonly TermSpans termSpans;
-
-      public PayloadTermSpanScorer(TermSpans spans, Weight weight, Similarity.SloppySimScorer docScorer) :base(spans, weight, docScorer) {
-        termSpans = spans;
-      }
+namespace Lucene.Net.Search.Payloads
+{
+    public class PayloadTermQuery : SpanTermQuery
+    {
+        protected PayloadFunction function;
+        private bool includeSpanScore;
+
+        public PayloadTermQuery(Term term, PayloadFunction function) : this(term, function, true) { }
+
+        public PayloadTermQuery(Term term, PayloadFunction function,
+          bool includeSpanScore)
+            : base(term)
+        {
+            this.function = function;
+            this.includeSpanScore = includeSpanScore;
+        }
 
-      protected override bool SetFreqCurrentDoc()  {
-        if (!more) {
-          return false;
+        public override Weight CreateWeight(IndexSearcher searcher)
+        {
+            return new PayloadTermWeight(this, searcher);
         }
-        doc = spans.Doc;
-        freq = 0.0f;
-        numMatches = 0;
-        payloadScore = 0;
-        payloadsSeen = 0;
-        while (more && doc == spans.Doc) {
-          int matchLength = spans.End - spans.Start;
-
-          freq += docScorer.ComputeSlopFactor(matchLength);
-          numMatches++;
-          ProcessPayload(similarity);
-
-          more = spans.Next();// this moves positions to the next match in this
-                              // document
+
+        protected class PayloadTermWeight : SpanWeight
+        {
+            protected readonly PayloadTermQuery parent;
+            
+            public PayloadTermWeight(PayloadTermQuery query, IndexSearcher searcher)
+                : base(query, searcher)
+            {
+                this.parent = query;
+            }
+
+            public override Scorer Scorer(AtomicReaderContext context, bool scoreDocsInOrder,
+                bool topScorer, IBits acceptDocs)
+            {
+                return new PayloadTermSpanScorer(this, (TermSpans)query.GetSpans(context, acceptDocs, termContexts),
+                    this, similarity.GetSloppySimScorer(stats, context));
+            }
+
+            protected class PayloadTermSpanScorer : SpanScorer
+            {
+                private readonly PayloadTermWeight parent;
+
+                protected BytesRef payload;
+                protected internal float payloadScore;
+                protected internal int payloadsSeen;
+                private readonly TermSpans termSpans;
+
+                public PayloadTermSpanScorer(PayloadTermWeight parent, TermSpans spans, Weight weight, Similarity.SloppySimScorer docScorer)
+                    : base(spans, weight, docScorer)
+                {
+                    this.parent = parent;
+                    termSpans = spans;
+                }
+
+                protected override bool SetFreqCurrentDoc()
+                {
+                    if (!more)
+                    {
+                        return false;
+                    }
+                    doc = spans.Doc;
+                    freq = 0.0f;
+                    numMatches = 0;
+                    payloadScore = 0;
+                    payloadsSeen = 0;
+                    while (more && doc == spans.Doc)
+                    {
+                        int matchLength = spans.End - spans.Start;
+
+                        freq += docScorer.ComputeSlopFactor(matchLength);
+                        numMatches++;
+                        ProcessPayload(parent.similarity);
+
+                        more = spans.Next();// this moves positions to the next match in this
+                        // document
+                    }
+                    return more || (freq != 0);
+                }
+
+                protected void ProcessPayload(Similarity similarity)
+                {
+                    if (termSpans.IsPayloadAvailable())
+                    {
+                        DocsAndPositionsEnum postings = termSpans.Postings;
+                        payload = postings.Payload;
+                        if (payload != null)
+                        {
+                            payloadScore = parent.parent.function.CurrentScore(doc, parent.parent.term.Field,
+                                                                 spans.Start, spans.End, payloadsSeen, payloadScore,
+                                                                 docScorer.ComputePayloadFactor(doc, spans.Start, spans.End, payload));
+                        }
+                        else
+                        {
+                            payloadScore = parent.parent.function.CurrentScore(doc, parent.parent.term.Field,
+                                                                 spans.Start, spans.End, payloadsSeen, payloadScore, 1F);
+                        }
+                        payloadsSeen++;
+
+                    }
+                    else
+                    {
+                        // zero out the payload?
+                    }
+                }
+
+                /**
+                 * 
+                 * @return {@link #getSpanScore()} * {@link #getPayloadScore()}
+                 * @throws IOException if there is a low-level I/O error
+                 */
+                public override float Score()
+                {
+
+                    return parent.parent.includeSpanScore ? GetSpanScore() * GetPayloadScore()
+                        : GetPayloadScore();
+                }
+
+                /**
+                 * Returns the SpanScorer score only.
+                 * <p/>
+                 * Should not be overridden without good cause!
+                 * 
+                 * @return the score for just the Span part w/o the payload
+                 * @throws IOException if there is a low-level I/O error
+                 * 
+                 * @see #score()
+                 */
+                protected float GetSpanScore()
+                {
+                    return base.Score();
+                }
+
+                /**
+                 * The score for the payload
+                 * 
+                 * @return The score, as calculated by
+                 *         {@link PayloadFunction#docScore(int, String, int, float)}
+                 */
+                protected internal float GetPayloadScore()
+                {
+                    return parent.parent.function.DocScore(doc, parent.parent.term.Field, payloadsSeen, payloadScore);
+                }
+            }
+
+            public override Explanation Explain(AtomicReaderContext context, int doc)
+            {
+                PayloadTermSpanScorer scorer = (PayloadTermSpanScorer)Scorer(context, true, false, context.AtomicReader.LiveDocs);
+                if (scorer != null)
+                {
+                    int newDoc = scorer.Advance(doc);
+                    if (newDoc == doc)
+                    {
+                        float freq = scorer.SloppyFreq();
+                        Similarity.SloppySimScorer docScorer = similarity.GetSloppySimScorer(stats, context);
+                        Explanation expl = new Explanation();
+                        expl.Description = "weight(" + Query + " in " + doc + ") [" + similarity.GetType().Name + "], result of:";
+                        Explanation scoreExplanation = docScorer.Explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+                        expl.AddDetail(scoreExplanation);
+                        expl.Value = scoreExplanation.Value;
+                        // now the payloads part
+                        // QUESTION: Is there a way to avoid this skipTo call? We need to know
+                        // whether to load the payload or not
+                        // GSI: I suppose we could toString the payload, but I don't think that
+                        // would be a good idea
+                        string field = ((SpanQuery)Query).Field;
+                        Explanation payloadExpl = parent.function.Explain(doc, field, scorer.payloadsSeen, scorer.payloadScore);
+                        payloadExpl.Value = scorer.GetPayloadScore();
+                        // combined
+                        ComplexExplanation result = new ComplexExplanation();
+                        if (parent.includeSpanScore)
+                        {
+                            result.AddDetail(expl);
+                            result.AddDetail(payloadExpl);
+                            result.Value = expl.Value * payloadExpl.Value;
+                            result.Description = "btq, product of:";
+                        }
+                        else
+                        {
+                            result.AddDetail(payloadExpl);
+                            result.Value = payloadExpl.Value;
+                            result.Description = "btq(includeSpanScore=false), result of:";
+                        }
+                        result.Match = true; // LUCENE-1303
+                        return result;
+                    }
+                }
+
+                return new ComplexExplanation(false, 0.0f, "no matching term");
+            }
         }
-        return more || (freq != 0);
-      }
-
-      protected void ProcessPayload(Similarity similarity)  {
-        if (termSpans.IsPayloadAvailable()) {
-          DocsAndPositionsEnum postings = termSpans.Postings;
-          payload = postings.Payload;
-          if (payload != null) {
-            payloadScore = function.CurrentScore(doc, term.field(),
-                                                 spans.Start, spans.End, payloadsSeen, payloadScore,
-                                                 docScorer.ComputePayloadFactor(doc, spans.Start, spans.End, payload));
-          } else {
-            payloadScore = function.CurrentScore(doc, term.field(),
-                                                 spans.Start, spans.End, payloadsSeen, payloadScore, 1F);
-          }
-          payloadsSeen++;
-
-        } else {
-          // zero out the payload?
+
+        public override int GetHashCode()
+        {
+            int prime = 31;
+            int result = base.GetHashCode();
+            result = prime * result + ((function == null) ? 0 : function.GetHashCode());
+            result = prime * result + (includeSpanScore ? 1231 : 1237);
+            return result;
         }
-      }
-
-      /**
-       * 
-       * @return {@link #getSpanScore()} * {@link #getPayloadScore()}
-       * @throws IOException if there is a low-level I/O error
-       */
-      public override float Score()  {
-
-        return includeSpanScore ? GetSpanScore() * GetPayloadScore()
-            : GetPayloadScore();
-      }
-
-      /**
-       * Returns the SpanScorer score only.
-       * <p/>
-       * Should not be overridden without good cause!
-       * 
-       * @return the score for just the Span part w/o the payload
-       * @throws IOException if there is a low-level I/O error
-       * 
-       * @see #score()
-       */
-      protected float GetSpanScore()  {
-        return base.Score();
-      }
-
-      /**
-       * The score for the payload
-       * 
-       * @return The score, as calculated by
-       *         {@link PayloadFunction#docScore(int, String, int, float)}
-       */
-      protected float GetPayloadScore() {
-        return function.DocScore(doc, term.field(), payloadsSeen, payloadScore);
-      }
-    }
-    
-    public override Explanation Explain(AtomicReaderContext context, int doc)  {
-      PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) Scorer(context, true, false, context.Reader.LiveDocs);
-      if (scorer != null) {
-        int newDoc = scorer.Advance(doc);
-        if (newDoc == doc) {
-          float freq = scorer.SloppyFreq();
-          Similarity.SloppySimScorer docScorer = Similarity.SloppySimScorer(stats, context);
-          Explanation expl = new Explanation();
-          expl.Description = "weight("+Query+" in "+doc+") [" + similarity.GetType().Name + "], result of:";
-          Explanation scoreExplanation = docScorer.Explain(doc, new Explanation(freq, "phraseFreq=" + freq));
-          expl.AddDetail(scoreExplanation);
-          expl.Value = scoreExplanation.Value;
-          // now the payloads part
-          // QUESTION: Is there a way to avoid this skipTo call? We need to know
-          // whether to load the payload or not
-          // GSI: I suppose we could toString the payload, but I don't think that
-          // would be a good idea
-          string field = ((SpanQuery)Query).Field;
-          Explanation payloadExpl = function.Explain(doc, field, scorer.payloadsSeen, scorer.payloadScore);
-          payloadExpl.Value = scorer.GetPayloadScore();
-          // combined
-          ComplexExplanation result = new ComplexExplanation();
-          if (includeSpanScore) {
-            result.AddDetail(expl);
-            result.AddDetail(payloadExpl);
-            result.Value = expl.Value * payloadExpl.Value;
-            result.Description = "btq, product of:";
-          } else {
-            result.AddDetail(payloadExpl);
-            result.Value = payloadExpl.Value;
-            result.Description ="btq(includeSpanScore=false), result of:";
-          }
-          result.Match = true; // LUCENE-1303
-          return result;
+
+        public override bool Equals(Object obj)
+        {
+            if (this == obj)
+                return true;
+            if (!base.Equals(obj))
+                return false;
+            if (GetType() != obj.GetType())
+                return false;
+            PayloadTermQuery other = (PayloadTermQuery)obj;
+            if (function == null)
+            {
+                if (other.function != null)
+                    return false;
+            }
+            else if (!function.Equals(other.function))
+                return false;
+            if (includeSpanScore != other.includeSpanScore)
+                return false;
+            return true;
         }
-      }
-      
-      return new ComplexExplanation(false, 0.0f, "no matching term");
-    }
-  }
-
-  public override int GetHashCode() {
-    int prime = 31;
-    int result = base.GetHashCode();
-    result = prime * result + ((function == null) ? 0 : function.GetHashCode());
-    result = prime * result + (includeSpanScore ? 1231 : 1237);
-    return result;
-  }
-
-  public override bool Equals(Object obj) {
-    if (this == obj)
-      return true;
-    if (!base.Equals(obj))
-      return false;
-    if (GetType() != obj.GetType())
-      return false;
-    PayloadTermQuery other = (PayloadTermQuery) obj;
-    if (function == null) {
-      if (other.function != null)
-        return false;
-    } else if (!function.Equals(other.function))
-      return false;
-    if (includeSpanScore != other.includeSpanScore)
-      return false;
-    return true;
-  }
 
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/PhraseQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/PhraseQuery.cs b/src/core/Search/PhraseQuery.cs
index 5e1340f..cc3a63c 100644
--- a/src/core/Search/PhraseQuery.cs
+++ b/src/core/Search/PhraseQuery.cs
@@ -277,7 +277,7 @@ namespace Lucene.Net.Search
             {
                 // assert !terms.isEmpty()
 
-                var reader = context.Reader;
+                var reader = context.AtomicReader;
                 var liveDocs = acceptDocs;
                 var postingsFreqs = new PostingsAndFreq[parent.terms.Count];
 
@@ -318,7 +318,7 @@ namespace Lucene.Net.Search
                 if (parent.slop == 0)
                 {
                     var s = new ExactPhraseScorer(this, postingsFreqs, similarity.GetExactSimScorer(stats, context));
-                    if (s.NoDocs)
+                    if (s.noDocs)
                     {
                         return null;
                     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/QueryWrapperFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/QueryWrapperFilter.cs b/src/core/Search/QueryWrapperFilter.cs
index 2eb1a94..3dc72f8 100644
--- a/src/core/Search/QueryWrapperFilter.cs
+++ b/src/core/Search/QueryWrapperFilter.cs
@@ -73,9 +73,9 @@ namespace Lucene.Net.Search
 		
 		public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
 		{
-		    var privateContext = context.Reader.Context;
+		    var privateContext = context.AtomicReader.AtomicContext;
 		    var weight = new IndexSearcher(privateContext).CreateNormalizedWeight(query);
-            return new AnonymousClassDocIdSet(this);
+            return new AnonymousClassDocIdSet(weight, privateContext, acceptDocs);
 		}
 		
 		public override string ToString()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/ReqExclScorer.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ReqExclScorer.cs b/src/core/Search/ReqExclScorer.cs
index 4c8fe5e..d910514 100644
--- a/src/core/Search/ReqExclScorer.cs
+++ b/src/core/Search/ReqExclScorer.cs
@@ -124,7 +124,7 @@ namespace Lucene.Net.Search
         {
             get
             {
-                return reqScorer.Freq();
+                return reqScorer.Freq;
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/ScoreCachingWrappingScorer.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ScoreCachingWrappingScorer.cs b/src/core/Search/ScoreCachingWrappingScorer.cs
index aecfa92..55898b7 100644
--- a/src/core/Search/ScoreCachingWrappingScorer.cs
+++ b/src/core/Search/ScoreCachingWrappingScorer.cs
@@ -65,7 +65,7 @@ namespace Lucene.Net.Search
         {
             get
             {
-                return scorer.Freq();
+                return scorer.Freq;
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/ScoringRewrite.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ScoringRewrite.cs b/src/core/Search/ScoringRewrite.cs
index a4d2d6b..8ae3ac3 100644
--- a/src/core/Search/ScoringRewrite.cs
+++ b/src/core/Search/ScoringRewrite.cs
@@ -40,9 +40,9 @@ namespace Lucene.Net.Search
         {
             public override Query Rewrite(IndexReader reader, MultiTermQuery query) 
             {
-              var bq = SCORING_BOOLEAN_QUERY_REWRITE.Rewrite(reader, query);
+              var bq = (BooleanQuery)SCORING_BOOLEAN_QUERY_REWRITE.Rewrite(reader, query);
               // TODO: if empty boolean query return NullQuery?
-              if (!bq.Clauses.Any())
+              if (bq.Clauses.Length == 0)
                 return bq;
               // strip the scores off
               var result = new ConstantScoreQuery(bq) {Boost = query.Boost};

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Similarities/SimilarityBase.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Similarities/SimilarityBase.cs b/src/core/Search/Similarities/SimilarityBase.cs
index 7acd358..4166a7b 100644
--- a/src/core/Search/Similarities/SimilarityBase.cs
+++ b/src/core/Search/Similarities/SimilarityBase.cs
@@ -41,7 +41,7 @@ namespace Lucene.Net.Search.Similarities
                                               TermStatistics termStats)
         {
             // assert collectionStats.sumTotalTermFreq() == -1 || collectionStats.sumTotalTermFreq() >= termStats.totalTermFreq();
-            var numberOfDocuments = collectionStats.MaxDocs;
+            var numberOfDocuments = collectionStats.MaxDoc;
 
             var docFreq = termStats.DocFreq;
             var totalTermFreq = termStats.TotalTermFreq;
@@ -104,7 +104,7 @@ namespace Lucene.Net.Search.Similarities
                 for (int i = 0; i < subScorers.Length; i++)
                 {
                     var basicstats = (BasicStats) subStats[i];
-                    subScorers[i] = new BasicExactDocScorer(basicstats, context.Reader.GetNormValues(basicstats.Field),
+                    subScorers[i] = new BasicExactDocScorer(basicstats, context.AtomicReader.GetNormValues(basicstats.Field),
                                                             this);
                 }
                 return new MultiSimilarity.MultiExactDocScorer(subScorers);
@@ -112,7 +112,7 @@ namespace Lucene.Net.Search.Similarities
             else
             {
                 var basicstats = (BasicStats) stats;
-                return new BasicExactDocScorer(basicstats, context.Reader.GetNormValues(basicstats.Field), this);
+                return new BasicExactDocScorer(basicstats, context.AtomicReader.GetNormValues(basicstats.Field), this);
             }
         }
 
@@ -126,7 +126,7 @@ namespace Lucene.Net.Search.Similarities
                 for (int i = 0; i < subScorers.Length; i++)
                 {
                     var basicstats = (BasicStats) subStats[i];
-                    subScorers[i] = new BasicSloppyDocScorer(basicstats, context.Reader.GetNormValues(basicstats.Field),
+                    subScorers[i] = new BasicSloppyDocScorer(basicstats, context.AtomicReader.GetNormValues(basicstats.Field),
                                                              this);
                 }
                 return new MultiSimilarity.MultiSloppyDocScorer(subScorers);
@@ -134,7 +134,7 @@ namespace Lucene.Net.Search.Similarities
             else
             {
                 var basicstats = (BasicStats) stats;
-                return new BasicSloppyDocScorer(basicstats, context.Reader.GetNormValues(basicstats.Field), this);
+                return new BasicSloppyDocScorer(basicstats, context.AtomicReader.GetNormValues(basicstats.Field), this);
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Similarities/TFIDFSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Similarities/TFIDFSimilarity.cs b/src/core/Search/Similarities/TFIDFSimilarity.cs
index 232742a..e20dbfc 100644
--- a/src/core/Search/Similarities/TFIDFSimilarity.cs
+++ b/src/core/Search/Similarities/TFIDFSimilarity.cs
@@ -12,7 +12,7 @@ namespace Lucene.Net.Search.Similarities
         {
             for (int i = 0; i < 256; i++)
             {
-                NORM_TABLE[i] = SmallFloat.Byte315ToFloat((byte) i);
+                NORM_TABLE[i] = SmallFloat.Byte315ToFloat((sbyte) i);
             }
         }
 
@@ -87,13 +87,13 @@ namespace Lucene.Net.Search.Similarities
         public override sealed ExactSimScorer GetExactSimScorer(SimWeight stats, AtomicReaderContext context)
         {
             var idfstats = (IDFStats) stats;
-            return new ExactTFIDFDocScorer(idfstats, context.Reader.GetNormValues(idfstats.Field), this);
+            return new ExactTFIDFDocScorer(idfstats, context.AtomicReader.GetNormValues(idfstats.Field), this);
         }
 
         public override sealed SloppySimScorer GetSloppySimScorer(SimWeight stats, AtomicReaderContext context)
         {
             var idfstats = (IDFStats) stats;
-            return new SloppyTFIDFDocScorer(idfstats, context.Reader.GetNormValues(idfstats.Field), this);
+            return new SloppyTFIDFDocScorer(idfstats, context.AtomicReader.GetNormValues(idfstats.Field), this);
         }
 
         private Explanation ExplainScore(int doc, Explanation freq, IDFStats stats, NumericDocValues norms)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Spans/NearSpansOrdered.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/NearSpansOrdered.cs b/src/core/Search/Spans/NearSpansOrdered.cs
index a22ff84..43c711d 100644
--- a/src/core/Search/Spans/NearSpansOrdered.cs
+++ b/src/core/Search/Spans/NearSpansOrdered.cs
@@ -181,7 +181,7 @@ namespace Lucene.Net.Search.Spans
             if (firstTime)
             {
                 firstTime = false;
-                foreach (Spans t in subSpans)
+                foreach (SpansBase t in subSpans)
                 {
                     if (!t.Next())
                     {
@@ -356,7 +356,7 @@ namespace Lucene.Net.Search.Spans
             int lastEnd = matchEnd;
             for (int i = subSpans.Length - 2; i >= 0; i--)
             {
-                Spans prevSpans = subSpans[i];
+                SpansBase prevSpans = subSpans[i];
                 if (collectPayloads && prevSpans.IsPayloadAvailable())
                 {
                     ICollection<sbyte[]> payload = prevSpans.GetPayload();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Spans/NearSpansUnordered.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/NearSpansUnordered.cs b/src/core/Search/Spans/NearSpansUnordered.cs
index d472b87..f16c25e 100644
--- a/src/core/Search/Spans/NearSpansUnordered.cs
+++ b/src/core/Search/Spans/NearSpansUnordered.cs
@@ -183,7 +183,7 @@ namespace Lucene.Net.Search.Spans
 
             SpanQuery[] clauses = query.GetClauses();
             queue = new CellQueue(this, clauses.Length);
-            subSpans = new Spans[clauses.Length];
+            subSpans = new SpansBase[clauses.Length];
             for (int i = 0; i < clauses.Length; i++)
             {
                 SpansCell cell = new SpansCell(this, clauses[i].GetSpans(context, acceptDocs, termContexts), i);


[36/50] [abbrv] git commit: Bugfix for NumericRangeQuery

Posted by mh...@apache.org.
Bugfix for NumericRangeQuery


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/d72f5c11
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/d72f5c11
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/d72f5c11

Branch: refs/heads/branch_4x
Commit: d72f5c11d57db4e96f15187babbd5c913eade637
Parents: 4cc8ff0
Author: Paul Irwin <pa...@gmail.com>
Authored: Wed Aug 7 13:56:13 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Wed Aug 7 13:56:13 2013 -0400

----------------------------------------------------------------------
 src/core/Search/NumericRangeQuery.cs | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/d72f5c11/src/core/Search/NumericRangeQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/NumericRangeQuery.cs b/src/core/Search/NumericRangeQuery.cs
index c1a2acb..8580a7e 100644
--- a/src/core/Search/NumericRangeQuery.cs
+++ b/src/core/Search/NumericRangeQuery.cs
@@ -163,6 +163,7 @@ namespace Lucene.Net.Search
         {
             if (precisionStep < 1)
                 throw new ArgumentException("precisionStep must be >= 1");
+            this.field = field;
             this.precisionStep = precisionStep;
             this.dataType = dataType;
             this.min = min;


[39/50] [abbrv] Implement Standard and Classic Analyzers

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/Std31/StandardTokenizerImpl31.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/Std31/StandardTokenizerImpl31.cs b/src/contrib/Analyzers/Standard/Std31/StandardTokenizerImpl31.cs
new file mode 100644
index 0000000..c369e53
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/Std31/StandardTokenizerImpl31.cs
@@ -0,0 +1,1116 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard.Std31
+{
+    public sealed class StandardTokenizerImpl31 : IStandardTokenizerInterface
+    {
+
+        /** This character denotes the end of file */
+        public const int YYEOF = -1;
+
+        /** initial size of the lookahead buffer */
+        private const int ZZ_BUFFERSIZE = 4096;
+
+        /** lexical states */
+        public const int YYINITIAL = 0;
+
+        /**
+         * ZZ_LEXSTATE[l] is the state in the DFA for the lexical state l
+         * ZZ_LEXSTATE[l+1] is the state in the DFA for the lexical state l
+         *                  at the beginning of a line
+         * l is of the form l = 2*k, k a non negative integer
+         */
+        private static readonly int[] ZZ_LEXSTATE = { 
+     0, 0
+  };
+
+        /** 
+         * Translates characters to character classes
+         */
+        private const String ZZ_CMAP_PACKED =
+          "\u0027\0\u0001\u0060\u0004\0\u0001\u005f\u0001\0\u0001\u0060\u0001\0\u000a\u005c\u0001\u005e\u0001\u005f" +
+          "\u0005\0\u001a\u005a\u0004\0\u0001\u0061\u0001\0\u001a\u005a\u002f\0\u0001\u005a\u0002\0\u0001\u005b" +
+          "\u0007\0\u0001\u005a\u0001\0\u0001\u005e\u0002\0\u0001\u005a\u0005\0\u0017\u005a\u0001\0\u001f\u005a" +
+          "\u0001\0\u01ca\u005a\u0004\0\u000c\u005a\u000e\0\u0005\u005a\u0007\0\u0001\u005a\u0001\0\u0001\u005a" +
+          "\u0011\0\u0070\u005b\u0005\u005a\u0001\0\u0002\u005a\u0002\0\u0004\u005a\u0001\u005f\u0007\0\u0001\u005a" +
+          "\u0001\u005e\u0003\u005a\u0001\0\u0001\u005a\u0001\0\u0014\u005a\u0001\0\u0053\u005a\u0001\0\u008b\u005a" +
+          "\u0001\0\u0007\u005b\u009e\u005a\u0009\0\u0026\u005a\u0002\0\u0001\u005a\u0007\0\u0027\u005a\u0001\0" +
+          "\u0001\u005f\u0007\0\u002d\u005b\u0001\0\u0001\u005b\u0001\0\u0002\u005b\u0001\0\u0002\u005b\u0001\0" +
+          "\u0001\u005b\u0008\0\u001b\u005a\u0005\0\u0004\u005a\u0001\u005e\u000b\0\u0004\u005b\u0008\0\u0002\u005f" +
+          "\u0002\0\u000b\u005b\u0005\0\u002b\u005a\u0015\u005b\u000a\u005c\u0001\0\u0001\u005c\u0001\u005f\u0001\0" +
+          "\u0002\u005a\u0001\u005b\u0063\u005a\u0001\0\u0001\u005a\u0007\u005b\u0001\u005b\u0001\0\u0006\u005b\u0002\u005a" +
+          "\u0002\u005b\u0001\0\u0004\u005b\u0002\u005a\u000a\u005c\u0003\u005a\u0002\0\u0001\u005a\u000f\0\u0001\u005b" +
+          "\u0001\u005a\u0001\u005b\u001e\u005a\u001b\u005b\u0002\0\u0059\u005a\u000b\u005b\u0001\u005a\u000e\0\u000a\u005c" +
+          "\u0021\u005a\u0009\u005b\u0002\u005a\u0002\0\u0001\u005f\u0001\0\u0001\u005a\u0005\0\u0016\u005a\u0004\u005b" +
+          "\u0001\u005a\u0009\u005b\u0001\u005a\u0003\u005b\u0001\u005a\u0005\u005b\u0012\0\u0019\u005a\u0003\u005b\u00a4\0" +
+          "\u0004\u005b\u0036\u005a\u0003\u005b\u0001\u005a\u0012\u005b\u0001\u005a\u0007\u005b\u000a\u005a\u0002\u005b\u0002\0" +
+          "\u000a\u005c\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0003\u005b\u0001\0\u0008\u005a\u0002\0" +
+          "\u0002\u005a\u0002\0\u0016\u005a\u0001\0\u0007\u005a\u0001\0\u0001\u005a\u0003\0\u0004\u005a\u0002\0" +
+          "\u0001\u005b\u0001\u005a\u0007\u005b\u0002\0\u0002\u005b\u0002\0\u0003\u005b\u0001\u005a\u0008\0\u0001\u005b" +
+          "\u0004\0\u0002\u005a\u0001\0\u0003\u005a\u0002\u005b\u0002\0\u000a\u005c\u0002\u005a\u000f\0\u0003\u005b" +
+          "\u0001\0\u0006\u005a\u0004\0\u0002\u005a\u0002\0\u0016\u005a\u0001\0\u0007\u005a\u0001\0\u0002\u005a" +
+          "\u0001\0\u0002\u005a\u0001\0\u0002\u005a\u0002\0\u0001\u005b\u0001\0\u0005\u005b\u0004\0\u0002\u005b" +
+          "\u0002\0\u0003\u005b\u0003\0\u0001\u005b\u0007\0\u0004\u005a\u0001\0\u0001\u005a\u0007\0\u000a\u005c" +
+          "\u0002\u005b\u0003\u005a\u0001\u005b\u000b\0\u0003\u005b\u0001\0\u0009\u005a\u0001\0\u0003\u005a\u0001\0" +
+          "\u0016\u005a\u0001\0\u0007\u005a\u0001\0\u0002\u005a\u0001\0\u0005\u005a\u0002\0\u0001\u005b\u0001\u005a" +
+          "\u0008\u005b\u0001\0\u0003\u005b\u0001\0\u0003\u005b\u0002\0\u0001\u005a\u000f\0\u0002\u005a\u0002\u005b" +
+          "\u0002\0\u000a\u005c\u0011\0\u0003\u005b\u0001\0\u0008\u005a\u0002\0\u0002\u005a\u0002\0\u0016\u005a" +
+          "\u0001\0\u0007\u005a\u0001\0\u0002\u005a\u0001\0\u0005\u005a\u0002\0\u0001\u005b\u0001\u005a\u0007\u005b" +
+          "\u0002\0\u0002\u005b\u0002\0\u0003\u005b\u0008\0\u0002\u005b\u0004\0\u0002\u005a\u0001\0\u0003\u005a" +
+          "\u0002\u005b\u0002\0\u000a\u005c\u0001\0\u0001\u005a\u0010\0\u0001\u005b\u0001\u005a\u0001\0\u0006\u005a" +
+          "\u0003\0\u0003\u005a\u0001\0\u0004\u005a\u0003\0\u0002\u005a\u0001\0\u0001\u005a\u0001\0\u0002\u005a" +
+          "\u0003\0\u0002\u005a\u0003\0\u0003\u005a\u0003\0\u000c\u005a\u0004\0\u0005\u005b\u0003\0\u0003\u005b" +
+          "\u0001\0\u0004\u005b\u0002\0\u0001\u005a\u0006\0\u0001\u005b\u000e\0\u000a\u005c\u0011\0\u0003\u005b" +
+          "\u0001\0\u0008\u005a\u0001\0\u0003\u005a\u0001\0\u0017\u005a\u0001\0\u000a\u005a\u0001\0\u0005\u005a" +
+          "\u0003\0\u0001\u005a\u0007\u005b\u0001\0\u0003\u005b\u0001\0\u0004\u005b\u0007\0\u0002\u005b\u0001\0" +
+          "\u0002\u005a\u0006\0\u0002\u005a\u0002\u005b\u0002\0\u000a\u005c\u0012\0\u0002\u005b\u0001\0\u0008\u005a" +
+          "\u0001\0\u0003\u005a\u0001\0\u0017\u005a\u0001\0\u000a\u005a\u0001\0\u0005\u005a\u0002\0\u0001\u005b" +
+          "\u0001\u005a\u0007\u005b\u0001\0\u0003\u005b\u0001\0\u0004\u005b\u0007\0\u0002\u005b\u0007\0\u0001\u005a" +
+          "\u0001\0\u0002\u005a\u0002\u005b\u0002\0\u000a\u005c\u0001\0\u0002\u005a\u000f\0\u0002\u005b\u0001\0" +
+          "\u0008\u005a\u0001\0\u0003\u005a\u0001\0\u0029\u005a\u0002\0\u0001\u005a\u0007\u005b\u0001\0\u0003\u005b" +
+          "\u0001\0\u0004\u005b\u0001\u005a\u0008\0\u0001\u005b\u0008\0\u0002\u005a\u0002\u005b\u0002\0\u000a\u005c" +
+          "\u000a\0\u0006\u005a\u0002\0\u0002\u005b\u0001\0\u0012\u005a\u0003\0\u0018\u005a\u0001\0\u0009\u005a" +
+          "\u0001\0\u0001\u005a\u0002\0\u0007\u005a\u0003\0\u0001\u005b\u0004\0\u0006\u005b\u0001\0\u0001\u005b" +
+          "\u0001\0\u0008\u005b\u0012\0\u0002\u005b\u000d\0\u0030\u0062\u0001\u0063\u0002\u0062\u0007\u0063\u0005\0" +
+          "\u0007\u0062\u0008\u0063\u0001\0\u000a\u005c\u0027\0\u0002\u0062\u0001\0\u0001\u0062\u0002\0\u0002\u0062" +
+          "\u0001\0\u0001\u0062\u0002\0\u0001\u0062\u0006\0\u0004\u0062\u0001\0\u0007\u0062\u0001\0\u0003\u0062" +
+          "\u0001\0\u0001\u0062\u0001\0\u0001\u0062\u0002\0\u0002\u0062\u0001\0\u0004\u0062\u0001\u0063\u0002\u0062" +
+          "\u0006\u0063\u0001\0\u0002\u0063\u0001\u0062\u0002\0\u0005\u0062\u0001\0\u0001\u0062\u0001\0\u0006\u0063" +
+          "\u0002\0\u000a\u005c\u0002\0\u0002\u0062\u0022\0\u0001\u005a\u0017\0\u0002\u005b\u0006\0\u000a\u005c" +
+          "\u000b\0\u0001\u005b\u0001\0\u0001\u005b\u0001\0\u0001\u005b\u0004\0\u0002\u005b\u0008\u005a\u0001\0" +
+          "\u0024\u005a\u0004\0\u0014\u005b\u0001\0\u0002\u005b\u0005\u005a\u000b\u005b\u0001\0\u0024\u005b\u0009\0" +
+          "\u0001\u005b\u0039\0\u002b\u0062\u0014\u0063\u0001\u0062\u000a\u005c\u0006\0\u0006\u0062\u0004\u0063\u0004\u0062" +
+          "\u0003\u0063\u0001\u0062\u0003\u0063\u0002\u0062\u0007\u0063\u0003\u0062\u0004\u0063\u000d\u0062\u000c\u0063\u0001\u0062" +
+          "\u0001\u0063\u000a\u005c\u0004\u0063\u0002\u0062\u0026\u005a\u000a\0\u002b\u005a\u0001\0\u0001\u005a\u0003\0" +
+          "\u0100\u0066\u0049\u005a\u0001\0\u0004\u005a\u0002\0\u0007\u005a\u0001\0\u0001\u005a\u0001\0\u0004\u005a" +
+          "\u0002\0\u0029\u005a\u0001\0\u0004\u005a\u0002\0\u0021\u005a\u0001\0\u0004\u005a\u0002\0\u0007\u005a" +
+          "\u0001\0\u0001\u005a\u0001\0\u0004\u005a\u0002\0\u000f\u005a\u0001\0\u0039\u005a\u0001\0\u0004\u005a" +
+          "\u0002\0\u0043\u005a\u0002\0\u0003\u005b\u0020\0\u0010\u005a\u0010\0\u0055\u005a\u000c\0\u026c\u005a" +
+          "\u0002\0\u0011\u005a\u0001\0\u001a\u005a\u0005\0\u004b\u005a\u0003\0\u0003\u005a\u000f\0\u000d\u005a" +
+          "\u0001\0\u0004\u005a\u0003\u005b\u000b\0\u0012\u005a\u0003\u005b\u000b\0\u0012\u005a\u0002\u005b\u000c\0" +
+          "\u000d\u005a\u0001\0\u0003\u005a\u0001\0\u0002\u005b\u000c\0\u0034\u0062\u0002\u0063\u001e\u0063\u0003\0" +
+          "\u0001\u0062\u0004\0\u0001\u0062\u0001\u0063\u0002\0\u000a\u005c\u0021\0\u0003\u005b\u0002\0\u000a\u005c" +
+          "\u0006\0\u0058\u005a\u0008\0\u0029\u005a\u0001\u005b\u0001\u005a\u0005\0\u0046\u005a\u000a\0\u001d\u005a" +
+          "\u0003\0\u000c\u005b\u0004\0\u000c\u005b\u000a\0\u000a\u005c\u001e\u0062\u0002\0\u0005\u0062\u000b\0" +
+          "\u002c\u0062\u0004\0\u0011\u0063\u0007\u0062\u0002\u0063\u0006\0\u000a\u005c\u0001\u0062\u0003\0\u0002\u0062" +
+          "\u0020\0\u0017\u005a\u0005\u005b\u0004\0\u0035\u0062\u000a\u0063\u0001\0\u001d\u0063\u0002\0\u0001\u005b" +
+          "\u000a\u005c\u0006\0\u000a\u005c\u0006\0\u000e\u0062\u0052\0\u0005\u005b\u002f\u005a\u0011\u005b\u0007\u005a" +
+          "\u0004\0\u000a\u005c\u0011\0\u0009\u005b\u000c\0\u0003\u005b\u001e\u005a\u000a\u005b\u0003\0\u0002\u005a" +
+          "\u000a\u005c\u0006\0\u0026\u005a\u000e\u005b\u000c\0\u0024\u005a\u0014\u005b\u0008\0\u000a\u005c\u0003\0" +
+          "\u0003\u005a\u000a\u005c\u0024\u005a\u0052\0\u0003\u005b\u0001\0\u0015\u005b\u0004\u005a\u0001\u005b\u0004\u005a" +
+          "\u0001\u005b\u000d\0\u00c0\u005a\u0027\u005b\u0015\0\u0004\u005b\u0116\u005a\u0002\0\u0006\u005a\u0002\0" +
+          "\u0026\u005a\u0002\0\u0006\u005a\u0002\0\u0008\u005a\u0001\0\u0001\u005a\u0001\0\u0001\u005a\u0001\0" +
+          "\u0001\u005a\u0001\0\u001f\u005a\u0002\0\u0035\u005a\u0001\0\u0007\u005a\u0001\0\u0001\u005a\u0003\0" +
+          "\u0003\u005a\u0001\0\u0007\u005a\u0003\0\u0004\u005a\u0002\0\u0006\u005a\u0004\0\u000d\u005a\u0005\0" +
+          "\u0003\u005a\u0001\0\u0007\u005a\u000f\0\u0002\u005b\u0002\u005b\u0008\0\u0002\u0060\u000a\0\u0001\u0060" +
+          "\u0002\0\u0001\u005e\u0002\0\u0005\u005b\u0010\0\u0002\u0061\u0003\0\u0001\u005f\u000f\0\u0001\u0061" +
+          "\u000b\0\u0005\u005b\u0005\0\u0006\u005b\u0001\0\u0001\u005a\u000d\0\u0001\u005a\u0010\0\u000d\u005a" +
+          "\u0033\0\u0021\u005b\u0011\0\u0001\u005a\u0004\0\u0001\u005a\u0002\0\u000a\u005a\u0001\0\u0001\u005a" +
+          "\u0003\0\u0005\u005a\u0006\0\u0001\u005a\u0001\0\u0001\u005a\u0001\0\u0001\u005a\u0001\0\u0004\u005a" +
+          "\u0001\0\u000b\u005a\u0002\0\u0004\u005a\u0005\0\u0005\u005a\u0004\0\u0001\u005a\u0011\0\u0029\u005a" +
+          "\u032d\0\u0034\u005a\u0716\0\u002f\u005a\u0001\0\u002f\u005a\u0001\0\u0085\u005a\u0006\0\u0004\u005a" +
+          "\u0003\u005b\u000e\0\u0026\u005a\u000a\0\u0036\u005a\u0009\0\u0001\u005a\u000f\0\u0001\u005b\u0017\u005a" +
+          "\u0009\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0007\u005a" +
+          "\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0020\u005b\u002f\0\u0001\u005a" +
+          "\u0050\0\u001a\u0064\u0001\0\u0059\u0064\u000c\0\u00d6\u0064\u002f\0\u0001\u005a\u0001\0\u0001\u0064" +
+          "\u0019\0\u0009\u0064\u0004\u005b\u0002\u005b\u0001\0\u0005\u005d\u0002\0\u0003\u0064\u0001\u005a\u0001\u005a" +
+          "\u0004\0\u0056\u0065\u0002\0\u0002\u005b\u0002\u005d\u0003\u0065\u005b\u005d\u0001\0\u0004\u005d\u0005\0" +
+          "\u0029\u005a\u0003\0\u005e\u0066\u0011\0\u001b\u005a\u0035\0\u0010\u005d\u001f\0\u0041\0\u001f\0" +
+          "\u0051\0\u002f\u005d\u0001\0\u0058\u005d\u00a8\0\u19b6\u0064\u004a\0\u51cc\u0064\u0034\0\u048d\u005a" +
+          "\u0043\0\u002e\u005a\u0002\0\u010d\u005a\u0003\0\u0010\u005a\u000a\u005c\u0002\u005a\u0014\0\u002f\u005a" +
+          "\u0004\u005b\u0009\0\u0002\u005b\u0001\0\u0019\u005a\u0008\0\u0050\u005a\u0002\u005b\u0025\0\u0009\u005a" +
+          "\u0002\0\u0067\u005a\u0002\0\u0004\u005a\u0001\0\u0002\u005a\u000e\0\u000a\u005a\u0050\0\u0008\u005a" +
+          "\u0001\u005b\u0003\u005a\u0001\u005b\u0004\u005a\u0001\u005b\u0017\u005a\u0005\u005b\u0018\0\u0034\u005a\u000c\0" +
+          "\u0002\u005b\u0032\u005a\u0011\u005b\u000b\0\u000a\u005c\u0006\0\u0012\u005b\u0006\u005a\u0003\0\u0001\u005a" +
+          "\u0004\0\u000a\u005c\u001c\u005a\u0008\u005b\u0002\0\u0017\u005a\u000d\u005b\u000c\0\u001d\u0066\u0003\0" +
+          "\u0004\u005b\u002f\u005a\u000e\u005b\u000e\0\u0001\u005a\u000a\u005c\u0026\0\u0029\u005a\u000e\u005b\u0009\0" +
+          "\u0003\u005a\u0001\u005b\u0008\u005a\u0002\u005b\u0002\0\u000a\u005c\u0006\0\u001b\u0062\u0001\u0063\u0004\0" +
+          "\u0030\u0062\u0001\u0063\u0001\u0062\u0003\u0063\u0002\u0062\u0002\u0063\u0005\u0062\u0002\u0063\u0001\u0062\u0001\u0063" +
+          "\u0001\u0062\u0018\0\u0005\u0062\u0021\0\u0006\u005a\u0002\0\u0006\u005a\u0002\0\u0006\u005a\u0009\0" +
+          "\u0007\u005a\u0001\0\u0007\u005a\u0091\0\u0023\u005a\u0008\u005b\u0001\0\u0002\u005b\u0002\0\u000a\u005c" +
+          "\u0006\0\u2ba4\u0066\u000c\0\u0017\u0066\u0004\0\u0031\u0066\u0004\0\u0001\u0019\u0001\u0015\u0001\u0026" +
+          "\u0001\u0023\u0001\u000b\u0003\0\u0001\u0007\u0001\u0005\u0002\0\u0001\u0003\u0001\u0001\u000c\0\u0001\u0009" +
+          "\u0011\0\u0001\u004a\u0007\0\u0001\u0035\u0001\u000f\u0006\0\u0001\u0058\u0003\0\u0001\u0050\u0001\u0050" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0051" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0055\u0001\u0053\u000f\0\u0001\u004c\u02c1\0\u0001\u0038\u00bf\0" +
+          "\u0001\u004b\u0001\u0039\u0001\u0002\u0003\u0054\u0002\u001d\u0001\u0054\u0001\u001d\u0002\u0054\u0001\u000c\u0011\u0054" +
+          "\u0002\u0030\u0007\u003b\u0001\u003a\u0007\u003b\u0007\u002a\u0001\u000d\u0001\u002a\u0001\u003d\u0002\u0025\u0001\u0024" +
+          "\u0001\u003d\u0001\u0025\u0001\u0024\u0008\u003d\u0002\u0033\u0005\u0031\u0002\u002c\u0005\u0031\u0001\u0006\u0008\u001f" +
+          "\u0005\u0011\u0003\u0017\u000a\u0046\u0010\u0017\u0003\u0022\u001a\u0018\u0001\u0016\u0002\u0014\u0002\u0048\u0001\u0049" +
+          "\u0002\u0048\u0002\u0049\u0002\u0048\u0001\u0049\u0003\u0014\u0001\u000e\u0002\u0014\u000a\u0034\u0001\u003c\u0001\u0021" +
+          "\u0001\u001c\u0001\u0034\u0006\u0021\u0001\u001c\u0036\u0021\u0005\u004d\u0006\u0043\u0001\u0029\u0004\u0043\u0002\u0029" +
+          "\u0008\u0043\u0001\u0029\u0007\u0040\u0001\u000a\u0002\u0040\u001a\u0043\u0001\u000a\u0004\u0040\u0001\u000a\u0005\u0042" +
+          "\u0001\u0041\u0001\u0042\u0003\u0041\u0007\u0042\u0001\u0041\u0013\u0042\u0005\u0037\u0003\u0042\u0006\u0037\u0002\u0037" +
+          "\u0006\u0036\u0008\u0036\u0002\u0040\u0007\u0036\u001e\u0040\u0004\u0036\u0042\u0040\u000d\u004d\u0001\u003f\u0002\u004d" +
+          "\u0001\u0059\u0003\u004f\u0001\u004d\u0002\u004f\u0005\u004d\u0004\u004f\u0004\u004e\u0001\u004d\u0003\u004e\u0001\u004d" +
+          "\u0005\u004e\u0016\u002e\u0004\u0013\u0001\u0045\u0002\u0044\u0004\u0052\u0001\u0044\u0002\u0052\u0003\u003e\u001b\u0052" +
+          "\u001d\u002d\u0003\u0052\u001d\u0056\u0003\u0052\u0006\u0056\u0002\u001b\u0019\u0056\u0001\u001b\u000f\u0056\u0006\u0052" +
+          "\u0004\u0012\u0001\u0008\u001f\u0012\u0001\u0008\u0004\u0012\u0015\u0032\u0001\u0057\u0009\u0032\u0011\u002d\u0005\u0032" +
+          "\u0001\u002f\u000a\u0020\u000b\u0032\u0004\u002d\u0001\u0028\u0006\u002d\u000a\u0052\u000f\u002d\u0001\u0027\u0003\u002b" +
+          "\u000d\u0010\u0009\u001e\u0001\u001a\u0014\u001e\u0002\u0010\u0009\u001e\u0001\u001a\u0019\u001e\u0001\u001a\u0004\u0010" +
+          "\u0004\u001e\u0002\u001a\u0002\u0047\u0001\u0004\u0005\u0047\u002a\u0004\u1900\0\u012e\u0064\u0002\0\u003e\u0064" +
+          "\u0002\0\u006a\u0064\u0026\0\u0007\u005a\u000c\0\u0005\u005a\u0005\0\u0001\u005a\u0001\u005b\u000a\u005a" +
+          "\u0001\0\u000d\u005a\u0001\0\u0005\u005a\u0001\0\u0001\u005a\u0001\0\u0002\u005a\u0001\0\u0002\u005a" +
+          "\u0001\0\u006c\u005a\u0021\0\u016b\u005a\u0012\0\u0040\u005a\u0002\0\u0036\u005a\u0028\0\u000c\u005a" +
+          "\u0004\0\u0010\u005b\u0001\u005f\u0002\0\u0001\u005e\u0001\u005f\u000b\0\u0007\u005b\u000c\0\u0002\u0061" +
+          "\u0018\0\u0003\u0061\u0001\u005f\u0001\0\u0001\u0060\u0001\0\u0001\u005f\u0001\u005e\u001a\0\u0005\u005a" +
+          "\u0001\0\u0087\u005a\u0002\0\u0001\u005b\u0007\0\u0001\u0060\u0004\0\u0001\u005f\u0001\0\u0001\u0060" +
+          "\u0001\0\u000a\u005c\u0001\u005e\u0001\u005f\u0005\0\u001a\u005a\u0004\0\u0001\u0061\u0001\0\u001a\u005a" +
+          "\u000b\0\u0038\u005d\u0002\u005b\u001f\u0066\u0003\0\u0006\u0066\u0002\0\u0006\u0066\u0002\0\u0006\u0066" +
+          "\u0002\0\u0003\u0066\u001c\0\u0003\u005b\u0004\0";
+
+        /** 
+         * Translates characters to character classes
+         */
+        private static readonly char[] ZZ_CMAP = zzUnpackCMap(ZZ_CMAP_PACKED);
+
+        /** 
+         * Translates DFA states to action switch labels.
+         */
+        private static readonly int[] ZZ_ACTION = zzUnpackAction();
+
+        private const String ZZ_ACTION_PACKED_0 =
+          "\u0001\0\u0013\u0001\u0001\u0002\u0001\u0003\u0001\u0004\u0001\u0001\u0001\u0005\u0001\u0006" +
+          "\u0001\u0007\u0001\u0008\u000d\0\u0001\u0002\u0001\0\u0001\u0002\u0008\0\u0001\u0003" +
+          "\u000d\0\u0001\u0002\u002f\0";
+
+        private static int[] zzUnpackAction()
+        {
+            int[] result = new int[114];
+            int offset = 0;
+            offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackAction(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+
+        /** 
+         * Translates a state to a row index in the transition table
+         */
+        private static readonly int[] ZZ_ROWMAP = zzUnpackRowMap();
+
+        private const String ZZ_ROWMAP_PACKED_0 =
+          "\0\0\0\u0067\0\u00ce\0\u0135\0\u019c\0\u0203\0\u026a\0\u02d1" +
+          "\0\u0338\0\u039f\0\u0406\0\u046d\0\u04d4\0\u053b\0\u05a2\0\u0609" +
+          "\0\u0670\0\u06d7\0\u073e\0\u07a5\0\u080c\0\u0873\0\u08da\0\u0941" +
+          "\0\u09a8\0\u0067\0\u0067\0\u0a0f\0\u00ce\0\u0135\0\u019c\0\u0203" +
+          "\0\u026a\0\u0a76\0\u0add\0\u0b44\0\u0bab\0\u046d\0\u0c12\0\u0c79" +
+          "\0\u0ce0\0\u0d47\0\u0dae\0\u0e15\0\u0e7c\0\u0338\0\u039f\0\u0ee3" +
+          "\0\u0f4a\0\u0fb1\0\u1018\0\u107f\0\u10e6\0\u114d\0\u11b4\0\u121b" +
+          "\0\u1282\0\u12e9\0\u1350\0\u13b7\0\u141e\0\u1485\0\u14ec\0\u1553" +
+          "\0\u15ba\0\u0941\0\u1621\0\u1688\0\u16ef\0\u1756\0\u17bd\0\u1824" +
+          "\0\u188b\0\u18f2\0\u1959\0\u19c0\0\u1a27\0\u1a8e\0\u1af5\0\u1b5c" +
+          "\0\u1bc3\0\u1c2a\0\u1c91\0\u1cf8\0\u1d5f\0\u1dc6\0\u1e2d\0\u1e94" +
+          "\0\u1efb\0\u1f62\0\u1fc9\0\u2030\0\u2097\0\u20fe\0\u2165\0\u21cc" +
+          "\0\u2233\0\u229a\0\u2301\0\u2368\0\u23cf\0\u2436\0\u249d\0\u2504" +
+          "\0\u256b\0\u25d2\0\u2639\0\u26a0\0\u2707\0\u276e\0\u27d5\0\u283c" +
+          "\0\u28a3\0\u290a";
+
+        private static int[] zzUnpackRowMap()
+        {
+            int[] result = new int[114];
+            int offset = 0;
+            offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackRowMap(String packed, int offset, int[] result)
+        {
+            int i = 0;  /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int high = packed[i++] << 16;
+                result[j++] = high | packed[i++];
+            }
+            return j;
+        }
+
+        /** 
+         * The transition table of the DFA
+         */
+        private static readonly int[] ZZ_TRANS = zzUnpackTrans();
+
+        private const String ZZ_TRANS_PACKED_0 =
+          "\u0001\u0002\u0001\u0003\u0001\u0002\u0001\u0004\u0001\u0002\u0001\u0005\u0001\u0002\u0001\u0006" +
+          "\u0001\u0002\u0001\u0007\u0001\u0002\u0001\u0008\u0003\u0002\u0001\u0009\u0005\u0002\u0001\u000a" +
+          "\u0003\u0002\u0001\u000b\u0009\u0002\u0001\u000c\u0002\u0002\u0001\u000d\u0023\u0002\u0001\u000e" +
+          "\u0001\u0002\u0001\u000f\u0003\u0002\u0001\u0010\u0001\u0011\u0001\u0002\u0001\u0012\u0001\u0002" +
+          "\u0001\u0013\u0002\u0002\u0001\u0014\u0001\u0002\u0001\u0015\u0001\u0002\u0001\u0016\u0001\u0017" +
+          "\u0003\u0002\u0001\u0018\u0002\u0019\u0001\u001a\u0001\u001b\u0001\u001c\u0069\0\u0001\u0015" +
+          "\u0009\0\u0001\u0015\u0010\0\u0001\u0015\u0012\0\u0001\u0015\u0008\0\u0003\u0015" +
+          "\u000f\0\u0001\u0015\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015" +
+          "\u0001\0\u0005\u0015\u0001\0\u0003\u0015\u0001\0\u0009\u0015\u0001\0\u0002\u0015" +
+          "\u0001\0\u000e\u0015\u0001\0\u0002\u0015\u0001\0\u0011\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0002\u0015" +
+          "\u0001\0\u0001\u0015\u000f\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0002\u0015" +
+          "\u0003\0\u0001\u0015\u000b\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0002\u0015" +
+          "\u0004\0\u0001\u0015\u0001\0\u0001\u0015\u0003\0\u0002\u0015\u0001\0\u0001\u0015" +
+          "\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u000d\0\u0001\u0015\u0008\0\u0001\u0015" +
+          "\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0002\0\u0004\u0015\u0001\0\u0003\u0015\u0002\0\u0003\u0015" +
+          "\u0001\0\u0004\u0015\u0001\0\u0002\u0015\u0002\0\u0003\u0015\u0001\0\u0009\u0015" +
+          "\u0001\0\u0002\u0015\u0001\0\u000e\u0015\u0001\0\u0002\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0002\u0015" +
+          "\u0001\0\u0001\u0015\u000f\0\u0001\u0015\u0003\0\u0001\u0015\u0003\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0002\u0015\u0001\0\u0003\u0015" +
+          "\u0003\0\u0002\u0015\u0001\0\u0001\u0015\u0001\0\u0002\u0015\u0001\0\u0002\u0015" +
+          "\u0003\0\u0002\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0002\u0015" +
+          "\u0001\0\u0002\u0015\u0001\0\u0002\u0015\u0001\0\u0005\u0015\u0001\0\u0005\u0015" +
+          "\u0001\0\u0002\u0015\u0001\0\u0002\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015" +
+          "\u0004\0\u0001\u0015\u0004\0\u0001\u0015\u0019\0\u0003\u0015\u0005\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u000c\0\u0001\u0015" +
+          "\u0005\0\u0001\u0015\u0009\0\u0002\u0015\u000a\0\u0001\u0016\u0001\0\u0002\u0015" +
+          "\u000a\0\u0001\u0015\u0014\0\u0001\u0015\u0001\0\u0001\u0016\u0007\0\u0002\u0015" +
+          "\u0002\0\u0005\u0015\u0002\0\u0002\u0015\u0004\0\u0006\u0015\u0001\0\u0002\u0015" +
+          "\u0004\0\u0005\u0015\u0001\0\u0005\u0015\u0001\0\u0002\u0015\u0001\0\u0003\u0015" +
+          "\u0001\0\u0004\u0015\u0001\0\u0005\u0015\u0001\u0016\u0001\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u000f\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0" +
+          "\u0002\u0015\u0003\0\u0001\u0015\u0004\0\u0003\u0015\u0004\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0002\u0015\u0004\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0003\0\u0002\u0015\u0001\0\u0001\u0015\u0005\0\u0003\u0015\u0001\0" +
+          "\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0016\u0001\0\u0001\u0015\u0008\0" +
+          "\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0" +
+          "\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0003\u0015\u0003\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0" +
+          "\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0009\0" +
+          "\u0002\u0015\u000f\0\u0001\u0015\u0006\0\u0002\u0015\u0004\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u000d\0" +
+          "\u0001\u0015\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u0007\0\u0001\u0015\u0002\0\u0001\u0015\u0005\0\u0001\u0015\u0002\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0046\0\u0001\u001b\u0011\0\u0001\u0017\u001d\0" +
+          "\u0001\u001a\u0003\0\u0001\u001a\u0003\0\u0001\u001a\u0001\0\u0003\u001a\u0002\0" +
+          "\u0001\u001a\u0002\0\u0001\u001a\u0001\0\u0003\u001a\u0003\0\u0002\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0002\u001a\u0001\0\u0002\u001a\u0003\0\u0002\u001a\u0001\0" +
+          "\u0001\u001a\u0003\0\u0002\u001a\u0001\0\u0002\u001a\u0001\0\u0002\u001a\u0001\0" +
+          "\u0005\u001a\u0001\0\u0005\u001a\u0002\0\u0001\u001a\u0001\0\u0002\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0003\u001a\u0004\0\u0001\u001a\u0004\0\u0001\u001a\u000f\0" +
+          "\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0003\u001a\u0001\0\u0005\u001a\u0001\0\u0003\u001a\u0001\0" +
+          "\u0009\u001a\u0001\0\u0002\u001a\u0001\0\u000e\u001a\u0001\0\u0002\u001a\u0001\0" +
+          "\u0011\u001a\u0001\0\u0001\u001a\u0001\0\u0003\u001a\u0002\0\u0001\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0002\u001a\u0001\0\u0001\u001a\u000f\0\u0001\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0001\u001a\u0003\0\u0001\u001a\u0001\0\u0003\u001a\u0001\0" +
+          "\u0002\u001a\u0001\0\u0002\u001a\u0001\0\u0003\u001a\u0001\0\u0009\u001a\u0001\0" +
+          "\u0002\u001a\u0001\0\u000e\u001a\u0001\0\u0002\u001a\u0001\0\u0011\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0003\u001a\u0002\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0" +
+          "\u0002\u001a\u0001\0\u0001\u001a\u000f\0\u0001\u001a\u0009\0\u0001\u001a\u0010\0" +
+          "\u0001\u001a\u001b\0\u0001\u001a\u0011\0\u0001\u001a\u0008\0\u0001\u001a\u0014\0" +
+          "\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0003\u001a\u0001\0\u0005\u001a\u0001\0\u0003\u001a\u0001\0" +
+          "\u0006\u001a\u0001\0\u0002\u001a\u0001\0\u0002\u001a\u0001\0\u0008\u001a\u0001\0" +
+          "\u0005\u001a\u0001\0\u0002\u001a\u0001\0\u0011\u001a\u0001\0\u0001\u001a\u0001\0" +
+          "\u0003\u001a\u0002\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0002\u001a\u0001\0" +
+          "\u0001\u001a\u0066\0\u0001\u001b\u000e\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0" +
+          "\u0001\u001f\u0001\0\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u0022\u0003\0" +
+          "\u0001\u0023\u0005\0\u0001\u0024\u0003\0\u0001\u0025\u0009\0\u0001\u0026\u0002\0" +
+          "\u0001\u0027\u000e\0\u0001\u0028\u0002\0\u0001\u0029\u0021\0\u0002\u0015\u0001\u002a" +
+          "\u0001\0\u0001\u002b\u0001\0\u0001\u002b\u0001\u002c\u0001\0\u0001\u0015\u0002\0" +
+          "\u0001\u0015\u0001\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0\u0001\u001f\u0001\0" +
+          "\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u002d\u0003\0\u0001\u002e\u0005\0" +
+          "\u0001\u002f\u0003\0\u0001\u0030\u0009\0\u0001\u0026\u0002\0\u0001\u0031\u000e\0" +
+          "\u0001\u0032\u0002\0\u0001\u0033\u0021\0\u0001\u0015\u0002\u0016\u0002\0\u0002\u0034" +
+          "\u0001\u0035\u0001\0\u0001\u0016\u0002\0\u0001\u0015\u000b\0\u0001\u0036\u000d\0" +
+          "\u0001\u0037\u000c\0\u0001\u0038\u000e\0\u0001\u0039\u0002\0\u0001\u003a\u0011\0" +
+          "\u0001\u003b\u0010\0\u0001\u0017\u0001\0\u0001\u0017\u0003\0\u0001\u002c\u0001\0" +
+          "\u0001\u0017\u0004\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0\u0001\u001f\u0001\0" +
+          "\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u003c\u0003\0\u0001\u002e\u0005\0" +
+          "\u0001\u002f\u0003\0\u0001\u003d\u0009\0\u0001\u0026\u0002\0\u0001\u003e\u000e\0" +
+          "\u0001\u003f\u0002\0\u0001\u0040\u0011\0\u0001\u0041\u000f\0\u0001\u0015\u0001\u0042" +
+          "\u0001\u0016\u0001\u0043\u0003\0\u0001\u0042\u0001\0\u0001\u0042\u0002\0\u0001\u0015" +
+          "\u0062\0\u0002\u0019\u0004\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0\u0001\u001f" +
+          "\u0001\0\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u0044\u0003\0\u0001\u0023" +
+          "\u0005\0\u0001\u0024\u0003\0\u0001\u0045\u0009\0\u0001\u0026\u0002\0\u0001\u0046" +
+          "\u000e\0\u0001\u0047\u0002\0\u0001\u0048\u0021\0\u0001\u0015\u0001\u001c\u0001\u002a" +
+          "\u0001\0\u0001\u002b\u0001\0\u0001\u002b\u0001\u002c\u0001\0\u0001\u001c\u0002\0" +
+          "\u0001\u001c\u0002\0\u0001\u0015\u0009\0\u0003\u0015\u0005\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0001\u0015\u0001\0" +
+          "\u0002\u0015\u0004\0\u0001\u0015\u0005\0\u0001\u0015\u0003\0\u0001\u0015\u0004\0" +
+          "\u0005\u0015\u0008\0\u0001\u002a\u0001\0\u0002\u0015\u0001\0\u0001\u0015\u0008\0" +
+          "\u0001\u0015\u0014\0\u0001\u0015\u0001\0\u0001\u002a\u0007\0\u0002\u0015\u0002\0" +
+          "\u0005\u0015\u0002\0\u0002\u0015\u0004\0\u0006\u0015\u0001\0\u0002\u0015\u0004\0" +
+          "\u0005\u0015\u0001\0\u0005\u0015\u0001\0\u0002\u0015\u0001\0\u0003\u0015\u0001\0" +
+          "\u0004\u0015\u0001\0\u0005\u0015\u0001\u002a\u0001\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0002\0\u0001\u0015\u000f\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0002\u0015" +
+          "\u0003\0\u0001\u0015\u0004\0\u0003\u0015\u0004\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0002\0\u0001\u0015\u0001\0\u0002\u0015\u0004\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0003\0\u0002\u0015\u0001\0\u0001\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015" +
+          "\u0008\0\u0001\u0015\u0001\0\u0002\u002a\u0001\0\u0001\u0015\u0008\0\u0001\u0015" +
+          "\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0\u0001\u0015" +
+          "\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0001\0\u0001\u0015\u0001\0\u0002\u0015\u0004\0\u0003\u0015" +
+          "\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015" +
+          "\u0003\0\u0001\u0015\u0005\0\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015" +
+          "\u0001\0\u0001\u0015\u0005\0\u0003\u0015\u0005\0\u0001\u0015\u0002\0\u0002\u0015" +
+          "\u0004\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0043\0\u0002\u0015" +
+          "\u0006\0\u0001\u0015\u002e\0\u0001\u0015\u0003\0\u0001\u0015\u0002\0\u0001\u0015" +
+          "\u0003\0\u0001\u0015\u0005\0\u0001\u0015\u0007\0\u0001\u0015\u0004\0\u0002\u0015" +
+          "\u0003\0\u0002\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0002\0\u0002\u0015\u0001\0\u0003\u0015\u0001\0\u0001\u0015\u0002\0\u0004\u0015" +
+          "\u0002\0\u0001\u0015\u0021\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0\u0001\u001f" +
+          "\u0001\0\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u0049\u0003\0\u0001\u0023" +
+          "\u0005\0\u0001\u0024\u0003\0\u0001\u004a\u0009\0\u0001\u0026\u0002\0\u0001\u004b" +
+          "\u000e\0\u0001\u004c\u0002\0\u0001\u004d\u0021\0\u0001\u0015\u0002\u002a\u0002\0" +
+          "\u0002\u004e\u0001\u002c\u0001\0\u0001\u002a\u0002\0\u0001\u0015\u0001\0\u0001\u001d" +
+          "\u0001\0\u0001\u001e\u0001\0\u0001\u001f\u0001\0\u0001\u0020\u0001\0\u0001\u0021" +
+          "\u0001\0\u0001\u004f\u0003\0\u0001\u0050\u0005\0\u0001\u0051\u0003\0\u0001\u0052" +
+          "\u0009\0\u0001\u0026\u0002\0\u0001\u0053\u000e\0\u0001\u0054\u0002\0\u0001\u0055" +
+          "\u0021\0\u0001\u0015\u0001\u002b\u0007\0\u0001\u002b\u0002\0\u0001\u0015\u0001\0" +
+          "\u0001\u001d\u0001\0\u0001\u001e\u0001\0\u0001\u001f\u0001\0\u0001\u0020\u0001\0" +
+          "\u0001\u0021\u0001\0\u0001\u0056\u0003\0\u0001\u0023\u0005\0\u0001\u0024\u0003\0" +
+          "\u0001\u0057\u0009\0\u0001\u0026\u0002\0\u0001\u0058\u000e\0\u0001\u0059\u0002\0" +
+          "\u0001\u005a\u0011\0\u0001\u0041\u000f\0\u0001\u0015\u0001\u002c\u0001\u002a\u0001\u0043" +
+          "\u0003\0\u0001\u002c\u0001\0\u0001\u002c\u0002\0\u0001\u0015\u0002\0\u0001\u0016" +
+          "\u0009\0\u0003\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0004\0\u0001\u0015\u0004\0\u0001\u0016\u0001\0\u0002\u0016\u0004\0\u0001\u0015" +
+          "\u0005\0\u0001\u0015\u0003\0\u0001\u0016\u0004\0\u0001\u0016\u0002\u0015\u0002\u0016" +
+          "\u0008\0\u0001\u0016\u0001\0\u0002\u0015\u0001\0\u0001\u0016\u0008\0\u0001\u0015" +
+          "\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0\u0001\u0015" +
+          "\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0001\0\u0001\u0016\u0001\0\u0002\u0015\u0004\0\u0003\u0015" +
+          "\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015" +
+          "\u0003\0\u0001\u0015\u0005\0\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015" +
+          "\u0001\0\u0001\u0015\u0005\0\u0001\u0015\u0002\u0016\u0005\0\u0001\u0015\u0002\0" +
+          "\u0001\u0015\u0001\u0016\u0004\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0043\0\u0002\u0016\u0006\0\u0001\u0016\u002e\0\u0001\u0016\u0003\0\u0001\u0016" +
+          "\u0002\0\u0001\u0016\u0003\0\u0001\u0016\u0005\0\u0001\u0016\u0007\0\u0001\u0016" +
+          "\u0004\0\u0002\u0016\u0003\0\u0002\u0016\u0001\0\u0001\u0016\u0004\0\u0001\u0016" +
+          "\u0001\0\u0001\u0016\u0002\0\u0002\u0016\u0001\0\u0003\u0016\u0001\0\u0001\u0016" +
+          "\u0002\0\u0004\u0016\u0002\0\u0001\u0016\u002b\0\u0001\u005b\u0003\0\u0001\u005c" +
+          "\u0005\0\u0001\u005d\u0003\0\u0001\u005e\u000c\0\u0001\u005f\u000e\0\u0001\u0060" +
+          "\u0002\0\u0001\u0061\u0022\0\u0001\u0034\u0001\u0016\u0006\0\u0001\u0034\u0004\0" +
+          "\u0001\u001d\u0001\0\u0001\u001e\u0001\0\u0001\u001f\u0001\0\u0001\u0020\u0001\0" +
+          "\u0001\u0021\u0001\0\u0001\u0062\u0003\0\u0001\u002e\u0005\0\u0001\u002f\u0003\0" +
+          "\u0001\u0063\u0009\0\u0001\u0026\u0002\0\u0001\u0064\u000e\0\u0001\u0065\u0002\0" +
+          "\u0001\u0066\u0011\0\u0001\u0041\u000f\0\u0001\u0015\u0001\u0035\u0001\u0016\u0001\u0043" +
+          "\u0003\0\u0001\u0035\u0001\0\u0001\u0035\u0002\0\u0001\u0015\u0002\0\u0001\u0017" +
+          "\u001f\0\u0001\u0017\u0001\0\u0002\u0017\u000e\0\u0001\u0017\u0004\0\u0001\u0017" +
+          "\u0002\0\u0002\u0017\u000d\0\u0001\u0017\u005a\0\u0001\u0017\u006b\0\u0002\u0017" +
+          "\u0009\0\u0001\u0017\u004d\0\u0002\u0017\u0006\0\u0001\u0017\u002e\0\u0001\u0017" +
+          "\u0003\0\u0001\u0017\u0002\0\u0001\u0017\u0003\0\u0001\u0017\u0005\0\u0001\u0017" +
+          "\u0007\0\u0001\u0017\u0004\0\u0002\u0017\u0003\0\u0002\u0017\u0001\0\u0001\u0017" +
+          "\u0004\0\u0001\u0017\u0001\0\u0001\u0017\u0002\0\u0002\u0017\u0001\0\u0003\u0017" +
+          "\u0001\0\u0001\u0017\u0002\0\u0004\u0017\u0002\0\u0001\u0017\u006b\0\u0001\u0017" +
+          "\u001d\0\u0001\u0042\u0009\0\u0003\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0001\u0042\u0001\0\u0002\u0042" +
+          "\u0004\0\u0001\u0015\u0005\0\u0001\u0015\u0003\0\u0001\u0042\u0004\0\u0001\u0042" +
+          "\u0002\u0015\u0002\u0042\u0008\0\u0001\u0016\u0001\0\u0002\u0015\u0001\0\u0001\u0042" +
+          "\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015" +
+          "\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015" +
+          "\u0002\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0001\0\u0001\u0042\u0001\0\u0002\u0015" +
+          "\u0004\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015" +
+          "\u0011\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0001\u0015\u001a\0\u000d\u0015" +
+          "\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0\u0001\u0015\u0002\u0042\u0005\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u0001\u0042\u0004\0\u0001\u0015\u0002\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0043\0\u0002\u0042\u0006\0\u0001\u0042\u002e\0\u0001\u0042" +
+          "\u0003\0\u0001\u0042\u0002\0\u0001\u0042\u0003\0\u0001\u0042\u0005\0\u0001\u0042" +
+          "\u0007\0\u0001\u0042\u0004\0\u0002\u0042\u0003\0\u0002\u0042\u0001\0\u0001\u0042" +
+          "\u0004\0\u0001\u0042\u0001\0\u0001\u0042\u0002\0\u0002\u0042\u0001\0\u0003\u0042" +
+          "\u0001\0\u0001\u0042\u0002\0\u0004\u0042\u0002\0\u0001\u0042\u006b\0\u0001\u0043" +
+          "\u0026\0\u0001\u0067\u000d\0\u0001\u0068\u000c\0\u0001\u0069\u000e\0\u0001\u006a" +
+          "\u0002\0\u0001\u006b\u0011\0\u0001\u0041\u0010\0\u0001\u0043\u0001\0\u0001\u0043" +
+          "\u0003\0\u0001\u002c\u0001\0\u0001\u0043\u0005\0\u0001\u001c\u0009\0\u0003\u0015" +
+          "\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015" +
+          "\u0004\0\u0001\u001c\u0001\0\u0002\u001c\u0004\0\u0001\u0015\u0005\0\u0001\u0015" +
+          "\u0003\0\u0001\u001c\u0004\0\u0001\u001c\u0002\u0015\u0002\u001c\u0008\0\u0001\u002a" +
+          "\u0001\0\u0002\u0015\u0001\0\u0001\u001c\u0008\0\u0001\u0015\u0014\0\u0001\u0015" +
+          "\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0\u0001\u0015\u0004\0\u0001\u0015" +
+          "\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015" +
+          "\u0001\0\u0001\u001c\u0001\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0\u0001\u0015" +
+          "\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0003\0\u0001\u0015" +
+          "\u0005\0\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015" +
+          "\u0005\0\u0001\u0015\u0002\u001c\u0005\0\u0001\u0015\u0002\0\u0001\u0015\u0001\u001c" +
+          "\u0004\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0043\0\u0002\u001c" +
+          "\u0006\0\u0001\u001c\u002e\0\u0001\u001c\u0003\0\u0001\u001c\u0002\0\u0001\u001c" +
+          "\u0003\0\u0001\u001c\u0005\0\u0001\u001c\u0007\0\u0001\u001c\u0004\0\u0002\u001c" +
+          "\u0003\0\u0002\u001c\u0001\0\u0001\u001c\u0004\0\u0001\u001c\u0001\0\u0001\u001c" +
+          "\u0002\0\u0002\u001c\u0001\0\u0003\u001c\u0001\0\u0001\u001c\u0002\0\u0004\u001c" +
+          "\u0002\0\u0001\u001c\u0022\0\u0001\u002a\u0009\0\u0003\u0015\u0005\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0001\u002a" +
+          "\u0001\0\u0002\u002a\u0004\0\u0001\u0015\u0005\0\u0001\u0015\u0003\0\u0001\u002a" +
+          "\u0004\0\u0001\u002a\u0002\u0015\u0002\u002a\u0008\0\u0001\u002a\u0001\0\u0002\u0015" +
+          "\u0001\0\u0001\u002a\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015" +
+          "\u0006\0\u0002\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0009\u0015\u0002\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015" +
+          "\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0001\0\u0001\u002a" +
+          "\u0001\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015" +
+          "\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0001\u0015" +
+          "\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0\u0001\u0015" +
+          "\u0002\u002a\u0005\0\u0001\u0015\u0002\0\u0001\u0015\u0001\u002a\u0004\0\u0001\u0015" +
+          "\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0043\0\u0002\u002a\u0006\0\u0001\u002a" +
+          "\u002e\0\u0001\u002a\u0003\0\u0001\u002a\u0002\0\u0001\u002a\u0003\0\u0001\u002a" +
+          "\u0005\0\u0001\u002a\u0007\0\u0001\u002a\u0004\0\u0002\u002a\u0003\0\u0002\u002a" +
+          "\u0001\0\u0001\u002a\u0004\0\u0001\u002a\u0001\0\u0001\u002a\u0002\0\u0002\u002a" +
+          "\u0001\0\u0003\u002a\u0001\0\u0001\u002a\u0002\0\u0004\u002a\u0002\0\u0001\u002a" +
+          "\u002b\0\u0001\u006c\u0003\0\u0001\u006d\u0005\0\u0001\u006e\u0003\0\u0001\u006f" +
+          "\u000c\0\u0001\u0070\u000e\0\u0001\u0071\u0002\0\u0001\u0072\u0022\0\u0001\u004e" +
+          "\u0001\u002a\u0006\0\u0001\u004e\u0005\0\u0001\u002b\u0009\0\u0003\u0015\u0005\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0" +
+          "\u0001\u002b\u0001\0\u0002\u002b\u0004\0\u0001\u0015\u0005\0\u0001\u0015\u0003\0" +
+          "\u0001\u002b\u0004\0\u0001\u002b\u0002\u0015\u0002\u002b\u000a\0\u0002\u0015\u0001\0" +
+          "\u0001\u002b\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0009\0\u0002\u0015\u0002\0" +
+          "\u0005\u0015\u0002\0\u0002\u0015\u0004\0\u0006\u0015\u0001\0\u0002\u0015\u0004\0" +
+          "\u0005\u0015\u0001\0\u0005\u0015\u0001\0\u0002\u0015\u0001\0\u0003\u0015\u0001\0" +
+          "\u0004\u0015\u0001\0\u0005\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0" +
+          "\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0002\0" +
+          "\u0001\u0015\u000f\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0002\u0015\u0003\0" +
+          "\u0001\u0015\u0004\0\u0003\u0015\u0004\0\u0001\u0015\u0001\0\u0001\u0015\u0002\0" +
+          "\u0001\u0015\u0001\0\u0002\u0015\u0004\0\u0001\u0015\u0001\0\u0001\u0015\u0003\0" +
+          "\u0002\u0015\u0001\0\u0001\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0" +
+          "\u0001\u0015\u0004\0\u0001\u0015\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0" +
+          "\u0001\u0015\u0006\0\u0002\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0009\u0015\u0002\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0" +
+          "\u0006\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0001\0" +
+          "\u0001\u002b\u0001\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0" +
+          "\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u0002\u002b\u0005\0\u0001\u0015\u0002\0\u0001\u0015\u0001\u002b\u0004\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0043\0\u0002\u002b\u0006\0" +
+          "\u0001\u002b\u002e\0\u0001\u002b\u0003\0\u0001\u002b\u0002\0\u0001\u002b\u0003\0" +
+          "\u0001\u002b\u0005\0\u0001\u002b\u0007\0\u0001\u002b\u0004\0\u0002\u002b\u0003\0" +
+          "\u0002\u002b\u0001\0\u0001\u002b\u0004\0\u0001\u002b\u0001\0\u0001\u002b\u0002\0" +
+          "\u0002\u002b\u0001\0\u0003\u002b\u0001\0\u0001\u002b\u0002\0\u0004\u002b\u0002\0" +
+          "\u0001\u002b\u0022\0\u0001\u002c\u0009\0\u0003\u0015\u0005\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0001\u002c\u0001\0" +
+          "\u0002\u002c\u0004\0\u0001\u0015\u0005\0\u0001\u0015\u0003\0\u0001\u002c\u0004\0" +
+          "\u0001\u002c\u0002\u0015\u0002\u002c\u0008\0\u0001\u002a\u0001\0\u0002\u0015\u0001\0" +
+          "\u0001\u002c\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0" +
+          "\u0002\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0" +
+          "\u0009\u0015\u0002\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0001\0\u0001\u002c\u0001\0" +
+          "\u0002\u0015\u0004\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0001\0" +
+          "\u0002\u0015\u0011\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0001\u0015\u001a\0" +
+          "\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0\u0001\u0015\u0002\u002c" +
+          "\u0005\0\u0001\u0015\u0002\0\u0001\u0015\u0001\u002c\u0004\0\u0001\u0015\u0002\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0043\0\u0002\u002c\u0006\0\u0001\u002c\u002e\0" +
+          "\u0001\u002c\u0003\0\u0001\u002c\u0002\0\u0001\u002c\u0003\0\u0001\u002c\u0005\0" +
+          "\u0001\u002c\u0007\0\u0001\u002c\u0004\0\u0002\u002c\u0003\0\u0002\u002c\u0001\0" +
+          "\u0001\u002c\u0004\0\u0001\u002c\u0001\0\u0001\u002c\u0002\0\u0002\u002c\u0001\0" +
+          "\u0003\u002c\u0001\0\u0001\u002c\u0002\0\u0004\u002c\u0002\0\u0001\u002c\u0022\0" +
+          "\u0001\u0034\u001f\0\u0001\u0034\u0001\0\u0002\u0034\u000e\0\u0001\u0034\u0004\0" +
+          "\u0001\u0034\u0002\0\u0002\u0034\u0008\0\u0001\u0016\u0004\0\u0001\u0034\u001f\0" +
+          "\u0001\u0016\u0042\0\u0001\u0016\u0067\0\u0002\u0016\u005c\0\u0001\u0034\u006b\0" +
+          "\u0002\u0034\u0009\0\u0001\u0034\u004d\0\u0002\u0034\u0006\0\u0001\u0034\u002e\0" +
+          "\u0001\u0034\u0003\0\u0001\u0034\u0002\0\u0001\u0034\u0003\0\u0001\u0034\u0005\0" +
+          "\u0001\u0034\u0007\0\u0001\u0034\u0004\0\u0002\u0034\u0003\0\u0002\u0034\u0001\0" +
+          "\u0001\u0034\u0004\0\u0001\u0034\u0001\0\u0001\u0034\u0002\0\u0002\u0034\u0001\0" +
+          "\u0003\u0034\u0001\0\u0001\u0034\u0002\0\u0004\u0034\u0002\0\u0001\u0034\u0022\0" +
+          "\u0001\u0035\u0009\0\u0003\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0001\u0035\u0001\0\u0002\u0035\u0004\0" +
+          "\u0001\u0015\u0005\0\u0001\u0015\u0003\0\u0001\u0035\u0004\0\u0001\u0035\u0002\u0015" +
+          "\u0002\u0035\u0008\0\u0001\u0016\u0001\0\u0002\u0015\u0001\0\u0001\u0035\u0008\0" +
+          "\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0" +
+          "\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0003\u0015\u0001\0\u0001\u0035\u0001\0\u0002\u0015\u0004\0" +
+          "\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0" +
+          "\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0001\u0015\u001a\0\u000d\u0015\u0005\0" +
+          "\u0003\u0015\u0001\0\u0001\u0015\u0005\0\u0001\u0015\u0002\u0035\u0005\0\u0001\u0015" +
+          "\u0002\0\u0001\u0015\u0001\u0035\u0004\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0043\0\u0002\u0035\u0006\0\u0001\u0035\u002e\0\u0001\u0035\u0003\0" +
+          "\u0001\u0035\u0002\0\u0001\u0035\u0003\0\u0001\u0035\u0005\0\u0001\u0035\u0007\0" +
+          "\u0001\u0035\u0004\0\u0002\u0035\u0003\0\u0002\u0035\u0001\0\u0001\u0035\u0004\0" +
+          "\u0001\u0035\u0001\0\u0001\u0035\u0002\0\u0002\u0035\u0001\0\u0003\u0035\u0001\0" +
+          "\u0001\u0035\u0002\0\u0004\u0035\u0002\0\u0001\u0035\u0022\0\u0001\u0043\u001f\0" +
+          "\u0001\u0043\u0001\0\u0002\u0043\u000e\0\u0001\u0043\u0004\0\u0001\u0043\u0002\0" +
+          "\u0002\u0043\u000d\0\u0001\u0043\u005a\0\u0001\u0043\u006b\0\u0002\u0043\u0009\0" +
+          "\u0001\u0043\u004d\0\u0002\u0043\u0006\0\u0001\u0043\u002e\0\u0001\u0043\u0003\0" +
+          "\u0001\u0043\u0002\0\u0001\u0043\u0003\0\u0001\u0043\u0005\0\u0001\u0043\u0007\0" +
+          "\u0001\u0043\u0004\0\u0002\u0043\u0003\0\u0002\u0043\u0001\0\u0001\u0043\u0004\0" +
+          "\u0001\u0043\u0001\0\u0001\u0043\u0002\0\u0002\u0043\u0001\0\u0003\u0043\u0001\0" +
+          "\u0001\u0043\u0002\0\u0004\u0043\u0002\0\u0001\u0043\u0022\0\u0001\u004e\u001f\0" +
+          "\u0001\u004e\u0001\0\u0002\u004e\u000e\0\u0001\u004e\u0004\0\u0001\u004e\u0002\0" +
+          "\u0002\u004e\u0008\0\u0001\u002a\u0004\0\u0001\u004e\u001f\0\u0001\u002a\u0042\0" +
+          "\u0001\u002a\u0067\0\u0002\u002a\u005c\0\u0001\u004e\u006b\0\u0002\u004e\u0009\0" +
+          "\u0001\u004e\u004d\0\u0002\u004e\u0006\0\u0001\u004e\u002e\0\u0001\u004e\u0003\0" +
+          "\u0001\u004e\u0002\0\u0001\u004e\u0003\0\u0001\u004e\u0005\0\u0001\u004e\u0007\0" +
+          "\u0001\u004e\u0004\0\u0002\u004e\u0003\0\u0002\u004e\u0001\0\u0001\u004e\u0004\0" +
+          "\u0001\u004e\u0001\0\u0001\u004e\u0002\0\u0002\u004e\u0001\0\u0003\u004e\u0001\0" +
+          "\u0001\u004e\u0002\0\u0004\u004e\u0002\0\u0001\u004e\u0020\0";
+
+        private static int[] zzUnpackTrans()
+        {
+            int[] result = new int[10609];
+            int offset = 0;
+            offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackTrans(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                value--;
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+
+        /* error codes */
+        private const int ZZ_UNKNOWN_ERROR = 0;
+        private const int ZZ_NO_MATCH = 1;
+        private const int ZZ_PUSHBACK_2BIG = 2;
+
+        /* error messages for the codes above */
+        private static readonly String[] ZZ_ERROR_MSG = {
+    "Unkown internal scanner error",
+    "Error: could not match input",
+    "Error: pushback value was too large"
+  };
+
+        /**
+         * ZZ_ATTRIBUTE[aState] contains the attributes of state <code>aState</code>
+         */
+        private static readonly int[] ZZ_ATTRIBUTE = zzUnpackAttribute();
+
+        private const String ZZ_ATTRIBUTE_PACKED_0 =
+          "\u0001\0\u0001\u0009\u0017\u0001\u0002\u0009\u0001\u0001\u000d\0\u0001\u0001\u0001\0" +
+          "\u0001\u0001\u0008\0\u0001\u0001\u000d\0\u0001\u0001\u002f\0";
+
+        private static int[] zzUnpackAttribute()
+        {
+            int[] result = new int[114];
+            int offset = 0;
+            offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackAttribute(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+        /** the input device */
+        private TextReader zzReader;
+
+        /** the current state of the DFA */
+        private int zzState;
+
+        /** the current lexical state */
+        private int zzLexicalState = YYINITIAL;
+
+        /** this buffer contains the current text to be matched and is
+            the source of the yytext() string */
+        private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
+
+        /** the textposition at the last accepting state */
+        private int zzMarkedPos;
+
+        /** the current text position in the buffer */
+        private int zzCurrentPos;
+
+        /** startRead marks the beginning of the yytext() string in the buffer */
+        private int zzStartRead;
+
+        /** endRead marks the last character in the buffer, that has been read
+            from input */
+        private int zzEndRead;
+
+        /** number of newlines encountered up to the start of the matched text */
+        private int yyline;
+
+        /** the number of characters up to the start of the matched text */
+        private int yychar;
+
+        /**
+         * the number of characters from the last newline up to the start of the 
+         * matched text
+         */
+        private int yycolumn;
+
+        /** 
+         * zzAtBOL == true <=> the scanner is currently at the beginning of a line
+         */
+        private bool zzAtBOL = true;
+
+        /** zzAtEOF == true <=> the scanner is at the EOF */
+        private bool zzAtEOF;
+
+        /** denotes if the user-EOF-code has already been executed */
+        private bool zzEOFDone;
+
+        /* user code: */
+        /** Alphanumeric sequences */
+        public const int WORD_TYPE = StandardTokenizer.ALPHANUM;
+
+        /** Numbers */
+        public const int NUMERIC_TYPE = StandardTokenizer.NUM;
+
+        /**
+         * Chars in class \p{Line_Break = Complex_Context} are from South East Asian
+         * scripts (Thai, Lao, Myanmar, Khmer, etc.).  Sequences of these are kept 
+         * together as as a single token rather than broken up, because the logic
+         * required to break them at word boundaries is too complex for UAX#29.
+         * <p>
+         * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
+         */
+        public const int SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.SOUTHEAST_ASIAN;
+
+        public const int IDEOGRAPHIC_TYPE = StandardTokenizer.IDEOGRAPHIC;
+
+        public const int HIRAGANA_TYPE = StandardTokenizer.HIRAGANA;
+
+        public const int KATAKANA_TYPE = StandardTokenizer.KATAKANA;
+
+        public const int HANGUL_TYPE = StandardTokenizer.HANGUL;
+
+        public int YYChar
+        {
+            get
+            {
+                return yychar;
+            }
+        }
+
+        /**
+         * Fills CharTermAttribute with the current token text.
+         */
+        public void GetText(ICharTermAttribute t)
+        {
+            t.CopyBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+
+
+        /**
+         * Creates a new scanner
+         * There is also a java.io.InputStream version of this constructor.
+         *
+         * @param   in  the TextReader to read input from.
+         */
+        public StandardTokenizerImpl31(TextReader input)
+        {
+            this.zzReader = input;
+        }
+
+
+
+        /** 
+         * Unpacks the compressed character translation table.
+         *
+         * @param packed   the packed character translation table
+         * @return         the unpacked character translation table
+         */
+        private static char[] zzUnpackCMap(String packed)
+        {
+            char[] map = new char[0x10000];
+            int i = 0;  /* index in packed string  */
+            int j = 0;  /* index in unpacked array */
+            while (i < 2650)
+            {
+                int count = packed[i++];
+                char value = packed[i++];
+                do map[j++] = value; while (--count > 0);
+            }
+            return map;
+        }
+
+
+        /**
+         * Refills the input buffer.
+         *
+         * @return      <code>false</code>, iff there was new input.
+         * 
+         * @exception   java.io.IOException  if any I/O-Error occurs
+         */
+        private bool zzRefill()
+        {
+
+            /* first: make room (if you can) */
+            if (zzStartRead > 0)
+            {
+                Array.Copy(zzBuffer, zzStartRead,
+                                 zzBuffer, 0,
+                                 zzEndRead - zzStartRead);
+
+                /* translate stored positions */
+                zzEndRead -= zzStartRead;
+                zzCurrentPos -= zzStartRead;
+                zzMarkedPos -= zzStartRead;
+                zzStartRead = 0;
+            }
+
+            /* is the buffer big enough? */
+            if (zzCurrentPos >= zzBuffer.Length)
+            {
+                /* if not: blow it up */
+                char[] newBuffer = new char[zzCurrentPos * 2];
+                Array.Copy(zzBuffer, 0, newBuffer, 0, zzBuffer.Length);
+                zzBuffer = newBuffer;
+            }
+
+            /* finally: fill the buffer with new input */
+            int numRead = zzReader.Read(zzBuffer, zzEndRead,
+                                                    zzBuffer.Length - zzEndRead);
+
+            if (numRead > 0)
+            {
+                zzEndRead += numRead;
+                return false;
+            }
+            // unlikely but not impossible: read 0 characters, but not at end of stream    
+            if (numRead == 0)
+            {
+                int c = zzReader.Read();
+                if (c <= 0)
+                {
+                    return true;
+                }
+                else
+                {
+                    zzBuffer[zzEndRead++] = (char)c;
+                    return false;
+                }
+            }
+
+            // numRead < 0
+            return true;
+        }
+
+
+        /**
+         * Closes the input stream.
+         */
+        public void yyclose()
+        {
+            zzAtEOF = true;            /* indicate end of file */
+            zzEndRead = zzStartRead;  /* invalidate buffer    */
+
+            if (zzReader != null)
+                zzReader.Close();
+        }
+
+
+        /**
+         * Resets the scanner to read from a new input stream.
+         * Does not close the old reader.
+         *
+         * All internal variables are reset, the old input stream 
+         * <b>cannot</b> be reused (internal buffer is discarded and lost).
+         * Lexical state is set to <tt>ZZ_INITIAL</tt>.
+         *
+         * Internal scan buffer is resized down to its initial length, if it has grown.
+         *
+         * @param reader   the new input stream 
+         */
+        public void YYReset(TextReader reader)
+        {
+            zzReader = reader;
+            zzAtBOL = true;
+            zzAtEOF = false;
+            zzEOFDone = false;
+            zzEndRead = zzStartRead = 0;
+            zzCurrentPos = zzMarkedPos = 0;
+            yyline = yychar = yycolumn = 0;
+            zzLexicalState = YYINITIAL;
+            if (zzBuffer.Length > ZZ_BUFFERSIZE)
+                zzBuffer = new char[ZZ_BUFFERSIZE];
+        }
+
+
+        /**
+         * Returns the current lexical state.
+         */
+        public int yystate()
+        {
+            return zzLexicalState;
+        }
+
+
+        /**
+         * Enters a new lexical state
+         *
+         * @param newState the new lexical state
+         */
+        public void yybegin(int newState)
+        {
+            zzLexicalState = newState;
+        }
+
+
+        /**
+         * Returns the text matched by the current regular expression.
+         */
+        public String yytext()
+        {
+            return new String(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+
+
+        /**
+         * Returns the character at position <tt>pos</tt> from the 
+         * matched text. 
+         * 
+         * It is equivalent to yytext().charAt(pos), but faster
+         *
+         * @param pos the position of the character to fetch. 
+         *            A value from 0 to yylength()-1.
+         *
+         * @return the character at position pos
+         */
+        public char yycharat(int pos)
+        {
+            return zzBuffer[zzStartRead + pos];
+        }
+
+
+        /**
+         * Returns the length of the matched text region.
+         */
+        public int YYLength
+        {
+            get
+            {
+                return zzMarkedPos - zzStartRead;
+            }
+        }
+
+
+        /**
+         * Reports an error that occured while scanning.
+         *
+         * In a wellformed scanner (no or only correct usage of 
+         * yypushback(int) and a match-all fallback rule) this method 
+         * will only be called with things that "Can't Possibly Happen".
+         * If this method is called, something is seriously wrong
+         * (e.g. a JFlex bug producing a faulty scanner etc.).
+         *
+         * Usual syntax/scanner level error handling should be done
+         * in error fallback rules.
+         *
+         * @param   errorCode  the code of the errormessage to display
+         */
+        private void zzScanError(int errorCode)
+        {
+            String message;
+            try
+            {
+                message = ZZ_ERROR_MSG[errorCode];
+            }
+            catch (IndexOutOfRangeException e)
+            {
+                message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR];
+            }
+
+            throw new Exception(message);
+        }
+
+
+        /**
+         * Pushes the specified amount of characters back into the input stream.
+         *
+         * They will be read again by then next call of the scanning method
+         *
+         * @param number  the number of characters to be read again.
+         *                This number must not be greater than yylength()!
+         */
+        public void yypushback(int number)
+        {
+            if (number > YYLength)
+                zzScanError(ZZ_PUSHBACK_2BIG);
+
+            zzMarkedPos -= number;
+        }
+
+
+        /**
+         * Resumes scanning until the next regular expression is matched,
+         * the end of input is encountered or an I/O-Error occurs.
+         *
+         * @return      the next token
+         * @exception   java.io.IOException  if any I/O-Error occurs
+         */
+        public int GetNextToken()
+        {
+            int zzInput;
+            int zzAction;
+
+            // cached fields:
+            int zzCurrentPosL;
+            int zzMarkedPosL;
+            int zzEndReadL = zzEndRead;
+            char[] zzBufferL = zzBuffer;
+            char[] zzCMapL = ZZ_CMAP;
+
+            int[] zzTransL = ZZ_TRANS;
+            int[] zzRowMapL = ZZ_ROWMAP;
+            int[] zzAttrL = ZZ_ATTRIBUTE;
+
+            while (true)
+            {
+                zzMarkedPosL = zzMarkedPos;
+
+                yychar += zzMarkedPosL - zzStartRead;
+
+                zzAction = -1;
+
+                zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL;
+
+                zzState = ZZ_LEXSTATE[zzLexicalState];
+
+                // set up zzAction for empty match case:
+                int zzAttributes = zzAttrL[zzState];
+                if ((zzAttributes & 1) == 1)
+                {
+                    zzAction = zzState;
+                }
+
+
+                //zzForAction: 
+                {
+                    while (true)
+                    {
+
+                        if (zzCurrentPosL < zzEndReadL)
+                            zzInput = zzBufferL[zzCurrentPosL++];
+                        else if (zzAtEOF)
+                        {
+                            zzInput = YYEOF;
+                            break;
+                        }
+                        else
+                        {
+                            // store back cached positions
+                            zzCurrentPos = zzCurrentPosL;
+                            zzMarkedPos = zzMarkedPosL;
+                            bool eof = zzRefill();
+                            // get translated positions and possibly new buffer
+                            zzCurrentPosL = zzCurrentPos;
+                            zzMarkedPosL = zzMarkedPos;
+                            zzBufferL = zzBuffer;
+                            zzEndReadL = zzEndRead;
+                            if (eof)
+                            {
+                                zzInput = YYEOF;
+                                break;
+                            }
+                            else
+                            {
+                                zzInput = zzBufferL[zzCurrentPosL++];
+                            }
+                        }
+                        int zzNext = zzTransL[zzRowMapL[zzState] + zzCMapL[zzInput]];
+                        if (zzNext == -1) break;
+                        zzState = zzNext;
+
+                        zzAttributes = zzAttrL[zzState];
+                        if ((zzAttributes & 1) == 1)
+                        {
+                            zzAction = zzState;
+                            zzMarkedPosL = zzCurrentPosL;
+                            if ((zzAttributes & 8) == 8) break;
+                        }
+
+                    }
+                }
+
+                // store back cached position
+                zzMarkedPos = zzMarkedPosL;
+
+                switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction])
+                {
+                    case 1:
+                        { /* Break so we don't hit fall-through warning: */
+                            break; /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
+                        }
+                    case 9: break;
+                    case 2:
+                        {
+                            return WORD_TYPE;
+                        }
+                    case 10: break;
+                    case 3:
+                        {
+                            return NUMERIC_TYPE;
+                        }
+                    case 11: break;
+                    case 4:
+                        {
+                            return KATAKANA_TYPE;
+                        }
+                    case 12: break;
+                    case 5:
+                        {
+                            return SOUTH_EAST_ASIAN_TYPE;
+                        }
+                    case 13: break;
+                    case 6:
+                        {
+                            return IDEOGRAPHIC_TYPE;
+                        }
+                    case 14: break;
+                    case 7:
+                        {
+                            return HIRAGANA_TYPE;
+                        }
+                    case 15: break;
+                    case 8:
+                        {
+                            return HANGUL_TYPE;
+                        }
+                    case 16: break;
+                    default:
+                        if (zzInput == YYEOF && zzStartRead == zzCurrentPos)
+                        {
+                            zzAtEOF = true;
+                            {
+                                return StandardTokenizerInterface.YYEOF;
+                            }
+                        }
+                        else
+                        {
+                            zzScanError(ZZ_NO_MATCH);
+                        }
+                        break;
+                }
+            }
+        }
+
+
+    }
+}


[50/50] [abbrv] git commit: Merge remote-tracking branch 'origin/branch_4x' into tmp

Posted by mh...@apache.org.
Merge remote-tracking branch 'origin/branch_4x' into tmp


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/a8ad4786
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/a8ad4786
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/a8ad4786

Branch: refs/heads/branch_4x
Commit: a8ad47860833367c71d0e95de384d78b77a9903a
Parents: 07f83ff ea1dc3c
Author: michael herndon <mh...@michaelherndon.com>
Authored: Tue Sep 24 14:29:29 2013 -0400
Committer: michael herndon <mh...@michaelherndon.com>
Committed: Tue Sep 24 14:29:29 2013 -0400

----------------------------------------------------------------------

----------------------------------------------------------------------



[08/50] [abbrv] git commit: Port: more util test classes

Posted by mh...@apache.org.
Port: more util test classes


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/f1544d66
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/f1544d66
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/f1544d66

Branch: refs/heads/branch_4x
Commit: f1544d6656045a482173cb1ff189e9fcde7050b5
Parents: 27c7d0d
Author: James Blair <jm...@gmail.com>
Authored: Thu Jul 11 18:37:06 2013 -0400
Committer: James Blair <jm...@gmail.com>
Committed: Thu Jul 11 18:37:06 2013 -0400

----------------------------------------------------------------------
 test/core/Util/TestSmallFloat.cs        | 262 ++++++++++++++++-----------
 test/core/Util/TestSortedVIntList.cs    | 244 -------------------------
 test/core/Util/TestSorterTemplate.cs    | 198 ++++++++++++++++++++
 test/core/Util/TestStringHelper.cs      |  48 -----
 test/core/Util/TestStringIntern.cs      | 137 --------------
 test/core/Util/TestUnicodeUtil.cs       | 173 ++++++++++++++++++
 test/core/Util/TestVersion.cs           |  45 ++++-
 test/core/Util/TestVersionComparator.cs |  38 ++++
 test/core/Util/TestVirtualMethod.cs     |  88 +++++++++
 9 files changed, 688 insertions(+), 545 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f1544d66/test/core/Util/TestSmallFloat.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestSmallFloat.cs b/test/core/Util/TestSmallFloat.cs
index de1c613..99ed773 100644
--- a/test/core/Util/TestSmallFloat.cs
+++ b/test/core/Util/TestSmallFloat.cs
@@ -21,114 +21,156 @@ using NUnit.Framework;
 
 namespace Lucene.Net.Util
 {
-	
-	/// <version>  $Id$
-	/// </version>
-	[TestFixture]
-	public class TestSmallFloat:LuceneTestCase
-	{
-		
-		// original lucene byteToFloat
-		internal static float Orig_byteToFloat(byte b)
-		{
-			if (b == 0)
-			// zero is a special case
-				return 0.0f;
-			int mantissa = b & 7;
-			int exponent = (b >> 3) & 31;
-			int bits = ((exponent + (63 - 15)) << 24) | (mantissa << 21);
-			return BitConverter.ToSingle(BitConverter.GetBytes(bits), 0);
-		}
-		
-		// original lucene floatToByte
-		internal static sbyte Orig_floatToByte(float f)
-		{
-			if (f < 0.0f)
-			// round negatives up to zero
-				f = 0.0f;
-			
-			if (f == 0.0f)
-			// zero is a special case
-				return 0;
-			
-			int bits = BitConverter.ToInt32(BitConverter.GetBytes(f), 0);
-			int mantissa = (bits & 0xffffff) >> 21;
-			int exponent = (((bits >> 24) & 0x7f) - 63) + 15;
-			
-			if (exponent > 31)
-			{
-				// overflow: use max value
-				exponent = 31;
-				mantissa = 7;
-			}
-			
-			if (exponent < 0)
-			{
-				// underflow: use min value
-				exponent = 0;
-				mantissa = 1;
-			}
-			
-			return (sbyte) ((exponent << 3) | mantissa); // pack into a byte
-		}
-		
-		[Test]
-		public virtual void  TestByteToFloat()
-		{
-			for (int i = 0; i < 256; i++)
-			{
-				float f1 = Orig_byteToFloat((byte) i);
-				float f2 = SmallFloat.ByteToFloat((byte) i, 3, 15);
-				float f3 = SmallFloat.Byte315ToFloat((byte) i);
-				Assert.AreEqual(f1, f2, 0.0);
-				Assert.AreEqual(f2, f3, 0.0);
-				
-				float f4 = SmallFloat.ByteToFloat((byte) i, 5, 2);
-				float f5 = SmallFloat.Byte52ToFloat((byte) i);
-				Assert.AreEqual(f4, f5, 0.0);
-			}
-		}
-		
-		[Test]
-		public virtual void  TestFloatToByte()
-		{
-			System.Random rand = NewRandom();
-			// up iterations for more exhaustive test after changing something
-			for (int i = 0; i < 100000; i++)
-			{
-				float f = BitConverter.ToSingle(BitConverter.GetBytes(rand.Next()), 0);
-				if (f != f)
-					continue; // skip NaN
-				sbyte b1 = Orig_floatToByte(f);
-				sbyte b2 = SmallFloat.FloatToByte(f, 3, 15);
-				sbyte b3 = SmallFloat.FloatToByte315(f);
-				Assert.AreEqual(b1, b2);
-				Assert.AreEqual(b2, b3);
-				
-				sbyte b4 = SmallFloat.FloatToByte(f, 5, 2);
-				sbyte b5 = SmallFloat.FloatToByte52(f);
-				Assert.AreEqual(b4, b5);
-			}
-		}
-		
-		/// <summary> 
-		/// // Do an exhaustive test of all possible floating point values
-		/// // for the 315 float against the original norm encoding in Similarity.
-		/// // Takes 75 seconds on my Pentium4 3GHz, with Java5 -server
-		/// public void testAllFloats() {
-		/// for(int i = Integer.MIN_VALUE;;i++) {
-		/// float f = Float.intBitsToFloat(i);
-		/// if (f==f) { // skip non-numbers
-		/// byte b1 = orig_floatToByte(f);
-		/// byte b2 = SmallFloat.floatToByte315(f);
-		/// if (b1!=b2) {
-		/// TestCase.fail("Failed floatToByte315 for float " + f);
-		/// }
-		/// }
-		/// if (i==Integer.MAX_VALUE) break;
-		/// }
-		/// }
-		/// *
-		/// </summary>
-	}
+
+    /// <version>  $Id$
+    /// </version>
+    [TestFixture]
+    public class TestSmallFloat : LuceneTestCase
+    {
+
+        // original lucene byteToFloat
+        internal static float Orig_byteToFloat(byte b)
+        {
+            if (b == 0)
+                // zero is a special case
+                return 0.0f;
+            int mantissa = b & 7;
+            int exponent = (b >> 3) & 31;
+            int bits = ((exponent + (63 - 15)) << 24) | (mantissa << 21);
+            return BitConverter.ToSingle(BitConverter.GetBytes(bits), 0);
+        }
+
+        internal static sbyte Orig_floatToByte_v13(float f)
+        {
+            if (f < 0.0f)                                 // round negatives up to zero
+                f = 0.0f;
+
+            if (f == 0.0f)                                // zero is a special case
+                return 0;
+
+            int bits = BitConverter.ToInt32(BitConverter.GetBytes(f), 0);           // parse float into parts
+            int mantissa = (bits & 0xffffff) >> 21;
+            int exponent = (((bits >> 24) & 0x7f) - 63) + 15;
+
+            if (exponent > 31)
+            {                          // overflow: use max value
+                exponent = 31;
+                mantissa = 7;
+            }
+
+            if (exponent < 0)
+            {                           // underflow: use min value
+                exponent = 0;
+                mantissa = 1;
+            }
+
+            return (sbyte)((exponent << 3) | mantissa);    // pack into a byte
+        }
+
+        // original lucene floatToByte
+        internal static sbyte Orig_floatToByte(float f)
+        {
+            if (f < 0.0f)
+                // round negatives up to zero
+                f = 0.0f;
+
+            if (f == 0.0f)
+                // zero is a special case
+                return 0;
+
+            int bits = BitConverter.ToInt32(BitConverter.GetBytes(f), 0);
+            int mantissa = (bits & 0xffffff) >> 21;
+            int exponent = (((bits >> 24) & 0x7f) - 63) + 15;
+
+            if (exponent > 31)
+            {
+                // overflow: use max value
+                exponent = 31;
+                mantissa = 7;
+            }
+
+            if (exponent < 0)
+            {
+                // underflow: use min value
+                exponent = 0;
+                mantissa = 1;
+            }
+
+            return (sbyte)((exponent << 3) | mantissa); // pack into a byte
+        }
+
+        [Test]
+        public virtual void TestByteToFloat()
+        {
+            for (int i = 0; i < 256; i++)
+            {
+                float f1 = Orig_byteToFloat((byte)i);
+                float f2 = SmallFloat.ByteToFloat((byte)i, 3, 15);
+                float f3 = SmallFloat.Byte315ToFloat((sbyte)i);
+                Assert.AreEqual(f1, f2, 0.0);
+                Assert.AreEqual(f2, f3, 0.0);
+
+                float f4 = SmallFloat.ByteToFloat((byte)i, 5, 2);
+                float f5 = SmallFloat.Byte52ToFloat((sbyte)i);
+                Assert.AreEqual(f4, f5, 0.0);
+            }
+        }
+
+        [Test]
+        public virtual void TestFloatToByte()
+        {
+            assertEquals(0, Orig_floatToByte_v13(5.8123817E-10f));       // verify the old bug (see LUCENE-2937)
+            assertEquals(1, Orig_floatToByte(5.8123817E-10f));           // verify it's fixed in this test code
+            assertEquals(1, SmallFloat.FloatToByte315(5.8123817E-10f));  // verify it's fixed
+
+            // test some constants
+            assertEquals(0, SmallFloat.FloatToByte315(0));
+            assertEquals(1, SmallFloat.FloatToByte315(float.MinValue));             // underflow rounds up to smallest positive
+            assertEquals(255, SmallFloat.FloatToByte315(float.MaxValue) & 0xff);    // overflow rounds down to largest positive
+            assertEquals(255, SmallFloat.FloatToByte315(float.PositiveInfinity) & 0xff);
+
+            // all negatives map to 0
+            assertEquals(0, SmallFloat.FloatToByte315(-float.MinValue));
+            assertEquals(0, SmallFloat.FloatToByte315(-float.MaxValue));
+            assertEquals(0, SmallFloat.FloatToByte315(float.NegativeInfinity));
+
+
+            // up iterations for more exhaustive test after changing something
+            int num = AtLeast(100000);
+            for (int i = 0; i < num; i++)
+            {
+                float f = BitConverter.ToSingle(BitConverter.GetBytes(new Random().Next()), 0);
+                if (float.IsNaN(f)) continue;    // skip NaN
+                sbyte b1 = Orig_floatToByte(f);
+                sbyte b2 = SmallFloat.FloatToByte(f, 3, 15);
+                sbyte b3 = SmallFloat.FloatToByte315(f);
+                assertEquals(b1, b2);
+                assertEquals(b2, b3);
+
+                sbyte b4 = SmallFloat.FloatToByte(f, 5, 2);
+                sbyte b5 = SmallFloat.FloatToByte52(f);
+                assertEquals(b4, b5);
+            }
+        }
+
+        /// <summary> 
+        /// // Do an exhaustive test of all possible floating point values
+        /// // for the 315 float against the original norm encoding in Similarity.
+        /// // Takes 75 seconds on my Pentium4 3GHz, with Java5 -server
+        /// public void testAllFloats() {
+        /// for(int i = Integer.MinValue;;i++) {
+        /// float f = Float.intBitsToFloat(i);
+        /// if (f==f) { // skip non-numbers
+        /// byte b1 = orig_floatToByte(f);
+        /// byte b2 = SmallFloat.floatToByte315(f);
+        /// if (b1!=b2) {
+        /// TestCase.fail("Failed floatToByte315 for float " + f);
+        /// }
+        /// }
+        /// if (i==Integer.MaxValue) break;
+        /// }
+        /// }
+        /// *
+        /// </summary>
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f1544d66/test/core/Util/TestSortedVIntList.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestSortedVIntList.cs b/test/core/Util/TestSortedVIntList.cs
deleted file mode 100644
index be63f10..0000000
--- a/test/core/Util/TestSortedVIntList.cs
+++ /dev/null
@@ -1,244 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-
-using NUnit.Framework;
-
-using DocIdSetIterator = Lucene.Net.Search.DocIdSetIterator;
-
-namespace Lucene.Net.Util
-{
-	
-	[TestFixture]
-	public class TestSortedVIntList:LuceneTestCase
-	{
-		
-		internal virtual void  TstIterator(SortedVIntList vintList, int[] ints)
-		{
-			for (int i = 0; i < ints.Length; i++)
-			{
-				if ((i > 0) && (ints[i - 1] == ints[i]))
-				{
-					return ; // DocNrSkipper should not skip to same document.
-				}
-			}
-			DocIdSetIterator m = vintList.Iterator();
-			for (int i = 0; i < ints.Length; i++)
-			{
-				Assert.IsTrue(m.NextDoc() != DocIdSetIterator.NO_MORE_DOCS, "No end of Matcher at: " + i);
-				Assert.AreEqual(ints[i], m.DocID());
-			}
-			Assert.IsTrue(m.NextDoc() == DocIdSetIterator.NO_MORE_DOCS, "End of Matcher");
-		}
-		
-		internal virtual void  TstVIntList(SortedVIntList vintList, int[] ints, int expectedByteSize)
-		{
-			Assert.AreEqual(ints.Length, vintList.Size, "Size");
-			Assert.AreEqual(expectedByteSize, vintList.ByteSize, "Byte size");
-			TstIterator(vintList, ints);
-		}
-		
-		public virtual void  TstViaBitSet(int[] ints, int expectedByteSize)
-		{
-			int MAX_INT_FOR_BITSET = 1024 * 1024;
-			//mgarski - BitArray cannot grow, so make as large as we would need it to be
-			System.Collections.BitArray bs = new System.Collections.BitArray(MAX_INT_FOR_BITSET);
-			for (int i = 0; i < ints.Length; i++)
-			{
-				if (ints[i] > MAX_INT_FOR_BITSET)
-				{
-					return ; // BitSet takes too much memory
-				}
-				if ((i > 0) && (ints[i - 1] == ints[i]))
-				{
-					return ; // BitSet cannot store duplicate.
-				}
-				bs.Set(ints[i], true);
-			}
-			SortedVIntList svil = new SortedVIntList(bs);
-			TstVIntList(svil, ints, expectedByteSize);
-			TstVIntList(new SortedVIntList(svil.Iterator()), ints, expectedByteSize);
-		}
-		
-		private const int VB1 = 0x7F;
-		private const int BIT_SHIFT = 7;
-		private static readonly int VB2 = (VB1 << BIT_SHIFT) | VB1;
-		private static readonly int VB3 = (VB2 << BIT_SHIFT) | VB1;
-		private static readonly int VB4 = (VB3 << BIT_SHIFT) | VB1;
-		
-		private int VIntByteSize(int i)
-		{
-			System.Diagnostics.Debug.Assert(i >= 0);
-			if (i <= VB1)
-				return 1;
-			if (i <= VB2)
-				return 2;
-			if (i <= VB3)
-				return 3;
-			if (i <= VB4)
-				return 4;
-			return 5;
-		}
-		
-		private int VIntListByteSize(int[] ints)
-		{
-			int byteSize = 0;
-			int last = 0;
-			for (int i = 0; i < ints.Length; i++)
-			{
-				byteSize += VIntByteSize(ints[i] - last);
-				last = ints[i];
-			}
-			return byteSize;
-		}
-		
-		public virtual void  TstInts(int[] ints)
-		{
-			int expectedByteSize = VIntListByteSize(ints);
-			try
-			{
-				TstVIntList(new SortedVIntList(ints), ints, expectedByteSize);
-				TstViaBitSet(ints, expectedByteSize);
-			}
-			catch (System.IO.IOException ioe)
-			{
-				throw new System.SystemException("", ioe);
-			}
-		}
-		
-		public virtual void  TstIllegalArgExc(int[] ints)
-		{
-            Assert.Throws<ArgumentException>(() => new SortedVIntList(ints), "Expected ArgumentException");
-		}
-		
-		private int[] FibArray(int a, int b, int size)
-		{
-			int[] fib = new int[size];
-			fib[0] = a;
-			fib[1] = b;
-			for (int i = 2; i < size; i++)
-			{
-				fib[i] = fib[i - 1] + fib[i - 2];
-			}
-			return fib;
-		}
-		
-		private int[] ReverseDiffs(int[] ints)
-		{
-			// reverse the order of the successive differences
-			int[] res = new int[ints.Length];
-			for (int i = 0; i < ints.Length; i++)
-			{
-				res[i] = ints[ints.Length - 1] + (ints[0] - ints[ints.Length - 1 - i]);
-			}
-			return res;
-		}
-		
-		[Test]
-		public virtual void  Test01()
-		{
-			TstInts(new int[]{});
-		}
-		[Test]
-		public virtual void  Test02()
-		{
-			TstInts(new int[]{0});
-		}
-		[Test]
-		public virtual void  Test04a()
-		{
-			TstInts(new int[]{0, VB2 - 1});
-		}
-		[Test]
-		public virtual void  Test04b()
-		{
-			TstInts(new int[]{0, VB2});
-		}
-		[Test]
-		public virtual void  Test04c()
-		{
-			TstInts(new int[]{0, VB2 + 1});
-		}
-		[Test]
-		public virtual void  Test05()
-		{
-			TstInts(FibArray(0, 1, 7)); // includes duplicate value 1
-		}
-		[Test]
-		public virtual void  Test05b()
-		{
-			TstInts(ReverseDiffs(FibArray(0, 1, 7)));
-		}
-		[Test]
-		public virtual void  Test06()
-		{
-			TstInts(FibArray(1, 2, 45)); // no duplicates, size 46 exceeds max int.
-		}
-		[Test]
-		public virtual void  Test06b()
-		{
-			TstInts(ReverseDiffs(FibArray(1, 2, 45)));
-		}
-		[Test]
-		public virtual void  Test07a()
-		{
-			TstInts(new int[]{0, VB3});
-		}
-		[Test]
-		public virtual void  Test07b()
-		{
-			TstInts(new int[]{1, VB3 + 2});
-		}
-		[Test]
-		public virtual void  Test07c()
-		{
-			TstInts(new int[]{2, VB3 + 4});
-		}
-		[Test]
-		public virtual void  Test08a()
-		{
-			TstInts(new int[]{0, VB4 + 1});
-		}
-		[Test]
-		public virtual void  Test08b()
-		{
-			TstInts(new int[]{1, VB4 + 1});
-		}
-		[Test]
-		public virtual void  Test08c()
-		{
-			TstInts(new int[]{2, VB4 + 1});
-		}
-		
-		[Test]
-		public virtual void  Test10()
-		{
-			TstIllegalArgExc(new int[]{- 1});
-		}
-		[Test]
-		public virtual void  Test11()
-		{
-			TstIllegalArgExc(new int[]{1, 0});
-		}
-		[Test]
-		public virtual void  Test12()
-		{
-			TstIllegalArgExc(new int[]{0, 1, 1, 2, 3, 5, 8, 0});
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f1544d66/test/core/Util/TestSorterTemplate.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestSorterTemplate.cs b/test/core/Util/TestSorterTemplate.cs
new file mode 100644
index 0000000..9f60d38
--- /dev/null
+++ b/test/core/Util/TestSorterTemplate.cs
@@ -0,0 +1,198 @@
+using System;
+using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestSorterTemplate : LuceneTestCase
+    {
+        private Random random = new Random();
+
+        private static readonly int SLOW_SORT_THRESHOLD = 1000;
+
+        // A sorter template that compares only the last 32 bits
+        internal class Last32BitsSorterTemplate : SorterTemplate
+        {
+
+            long[] arr;
+            long pivot;
+
+            internal Last32BitsSorterTemplate(long[] arr)
+            {
+                this.arr = arr;
+            }
+
+            protected internal override void Swap(int i, int j)
+            {
+                long tmp = arr[i];
+                arr[i] = arr[j];
+                arr[j] = tmp;
+            }
+
+            private int CompareValues(long i, long j)
+            {
+                // only compare the last 32 bits
+                long a = i & 0xFFFFFFFFL;
+                long b = j & 0xFFFFFFFFL;
+                return a < b ? -1 : a == b ? 0 : 1;
+            }
+
+            protected internal override int Compare(int i, int j)
+            {
+                return CompareValues(arr[i], arr[j]);
+            }
+
+            protected internal override void SetPivot(int i)
+            {
+                pivot = arr[i];
+            }
+
+            protected internal override int ComparePivot(int j)
+            {
+                return CompareValues(pivot, arr[j]);
+            }
+
+            protected override void Merge(int lo, int pivot, int hi, int len1, int len2)
+            {
+                // TimSort and MergeSort should call runMerge to sort out trivial cases
+                Assert.IsTrue(len1 >= 1);
+                Assert.IsTrue(len2 >= 1);
+                Assert.IsTrue(len1 + len2 >= 3);
+                Assert.IsTrue(Compare(lo, pivot) > 0);
+                Assert.IsTrue(Compare(pivot - 1, hi - 1) > 0);
+                Assert.IsFalse(Compare(pivot - 1, pivot) <= 0);
+                base.Merge(lo, pivot, hi, len1, len2);
+            }
+
+        }
+
+        void TestSort(int[] intArr)
+        {
+            // we modify the array as a long[] and store the original ord in the first 32 bits
+            // to be able to check stability
+            long[] arr = ToLongsAndOrds(intArr);
+
+            // use MergeSort as a reference
+            // Assert.AreEqual checks for sorting + stability
+            // Assert.AreEqual(ToInts) checks for sorting only
+            long[] mergeSorted = Arrays.CopyOf(arr, arr.Length);
+            new Last32BitsSorterTemplate(mergeSorted).MergeSort(0, arr.Length - 1);
+
+            if (arr.Length < SLOW_SORT_THRESHOLD)
+            {
+                long[] insertionSorted = Arrays.CopyOf(arr, arr.Length);
+                new Last32BitsSorterTemplate(insertionSorted).InsertionSort(0, arr.Length - 1);
+                Assert.AreEqual(mergeSorted, insertionSorted);
+
+                long[] binarySorted = Arrays.CopyOf(arr, arr.Length);
+                new Last32BitsSorterTemplate(binarySorted).BinarySort(0, arr.Length - 1);
+                Assert.AreEqual(mergeSorted, binarySorted);
+            }
+
+            long[] quickSorted = Arrays.CopyOf(arr, arr.Length);
+            new Last32BitsSorterTemplate(quickSorted).QuickSort(0, arr.Length - 1);
+            Assert.AreEqual(ToInts(mergeSorted), ToInts(quickSorted));
+
+            long[] timSorted = Arrays.CopyOf(arr, arr.Length);
+            new Last32BitsSorterTemplate(timSorted).TimSort(0, arr.Length - 1);
+            Assert.AreEqual(mergeSorted, timSorted);
+        }
+
+        private int[] ToInts(long[] longArr)
+        {
+            int[] arr = new int[longArr.Length];
+            for (int i = 0; i < longArr.Length; ++i)
+            {
+                arr[i] = (int)longArr[i];
+            }
+            return arr;
+        }
+
+        private long[] ToLongsAndOrds(int[] intArr)
+        {
+            long[] arr = new long[intArr.Length];
+            for (int i = 0; i < intArr.Length; ++i)
+            {
+                arr[i] = (((long)i) << 32) | (intArr[i] & 0xFFFFFFFFL);
+            }
+            return arr;
+        }
+
+        int RandomLength()
+        {
+            return _TestUtil.NextInt(random, 1, random.NextBool() ? SLOW_SORT_THRESHOLD : 100000);
+        }
+
+        [Test]
+        public void TestEmpty()
+        {
+            TestSort(new int[0]);
+        }
+
+        [Test]
+        public void TestAscending()
+        {
+            int Length = RandomLength();
+            int[] arr = new int[Length];
+            arr[0] = random.Next(10);
+            for (int i = 1; i < arr.Length; ++i)
+            {
+                arr[i] = arr[i - 1] + _TestUtil.NextInt(random, 0, 10);
+            }
+            TestSort(arr);
+        }
+
+        [Test]
+        public void TestDescending()
+        {
+            int Length = RandomLength();
+            int[] arr = new int[Length];
+            arr[0] = random.Next(10);
+            for (int i = 1; i < arr.Length; ++i)
+            {
+                arr[i] = arr[i - 1] - _TestUtil.NextInt(random, 0, 10);
+            }
+            TestSort(arr);
+        }
+
+        [Test]
+        public void TestStrictlyDescending()
+        {
+            int Length = RandomLength();
+            int[] arr = new int[Length];
+            arr[0] = random.Next(10);
+            for (int i = 1; i < arr.Length; ++i)
+            {
+                arr[i] = arr[i - 1] - _TestUtil.NextInt(random, 1, 10);
+            }
+            TestSort(arr);
+        }
+
+        [Test]
+        public void TestRandom1()
+        {
+            int Length = RandomLength();
+            int[] arr = new int[Length];
+            for (int i = 1; i < arr.Length; ++i)
+            {
+                arr[i] = random.Next();
+            }
+            TestSort(arr);
+        }
+
+        [Test]
+        public void TestRandom2()
+        {
+            int Length = RandomLength();
+            int[] arr = new int[Length];
+            for (int i = 1; i < arr.Length; ++i)
+            {
+                arr[i] = random.Next(10);
+            }
+            TestSort(arr);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f1544d66/test/core/Util/TestStringHelper.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestStringHelper.cs b/test/core/Util/TestStringHelper.cs
deleted file mode 100644
index b9eb290..0000000
--- a/test/core/Util/TestStringHelper.cs
+++ /dev/null
@@ -1,48 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-
-using NUnit.Framework;
-
-namespace Lucene.Net.Util
-{
-	
-	[TestFixture]
-	public class TestStringHelper:LuceneTestCase
-	{
-		
-		
-		[Test]
-		public virtual void  TestStringDifference()
-		{
-			System.String test1 = "test";
-			System.String test2 = "testing";
-			
-			int result = StringHelper.StringDifference(test1, test2);
-			Assert.IsTrue(result == 4);
-			
-			test2 = "foo";
-			result = StringHelper.StringDifference(test1, test2);
-			Assert.IsTrue(result == 0);
-			
-			test2 = "test";
-			result = StringHelper.StringDifference(test1, test2);
-			Assert.IsTrue(result == 4);
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f1544d66/test/core/Util/TestStringIntern.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestStringIntern.cs b/test/core/Util/TestStringIntern.cs
deleted file mode 100644
index 52b902d..0000000
--- a/test/core/Util/TestStringIntern.cs
+++ /dev/null
@@ -1,137 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-using System.Threading;
-
-using NUnit.Framework;
-
-namespace Lucene.Net.Util
-{
-	
-    [TestFixture]
-	public class TestStringIntern:LuceneTestCase
-	{
-		public TestStringIntern()
-		{
-			InitBlock();
-		}
-		private class TestRun
-		{
-			public TestRun(Int32 seed, int iter, bool newStrings, TestStringIntern enclosingInstance)
-			{
-				this.seed = seed;
-				this.iter = iter;
-				this.newStrings = newStrings;
-				this.enclosingInstance = enclosingInstance;
-                this.Reset = new ManualResetEvent(false);
-			}
-			private System.Int32 seed;
-			private int iter;
-			private bool newStrings;
-			private TestStringIntern enclosingInstance;
-			public TestStringIntern Enclosing_Instance
-			{
-				get
-				{
-					return enclosingInstance;
-				}
-				
-			}
-
-            public ManualResetEvent Reset;
-
-			public void Run(System.Object state)
-			{
-				System.Random rand = new Random(seed);
-				System.String[] myInterned = new System.String[Enclosing_Instance.testStrings.Length];
-				for (int j = 0; j < iter; j++)
-				{
-					int idx = rand.Next(Enclosing_Instance.testStrings.Length);
-					System.String s = Enclosing_Instance.testStrings[idx];
-					if (newStrings == true && (new System.Random().NextDouble()) > 0.5)
-						s = new System.Text.StringBuilder(s).ToString(); // make a copy half of the time
-					System.String interned = StringHelper.Intern(s);
-					System.String prevInterned = myInterned[idx];
-					System.String otherInterned = Enclosing_Instance.internedStrings[idx];
-					
-					// test against other threads
-                    Assert.IsFalse(otherInterned != null && otherInterned != interned);
-					Enclosing_Instance.internedStrings[idx] = interned;
-					
-					// test against local copy
-                    Assert.IsFalse(prevInterned != null && prevInterned != interned);
-					myInterned[idx] = interned;
-				}
-                this.Reset.Set();
-			}
-		}
-		private void  InitBlock()
-		{
-			r = NewRandom();
-		}
-		internal System.String[] testStrings;
-		internal System.String[] internedStrings;
-		internal System.Random r;
-		
-		private System.String RandStr(int len)
-		{
-			char[] arr = new char[len];
-			for (int i = 0; i < len; i++)
-			{
-				arr[i] = (char) ('a' + r.Next(26));
-			}
-			return new System.String(arr);
-		}
-		
-		private void  MakeStrings(int sz)
-		{
-			testStrings = new System.String[sz];
-			internedStrings = new System.String[sz];
-			for (int i = 0; i < sz; i++)
-			{
-				testStrings[i] = RandStr(r.Next(8) + 3);
-			}
-		}
-		
-        [Test]
-		public virtual void  TestStringIntern_Renamed()
-		{
-			MakeStrings(1024 * 10); // something greater than the capacity of the default cache size
-			// makeStrings(100);  // realistic for perf testing
-			int nThreads = 20;
-			// final int iter=100000;
-			int iter = 1000000;
-			bool newStrings = true;
-			
-			// try native intern
-			// StringHelper.interner = new StringInterner();
-			
-			TestRun[] threads = new TestRun[nThreads];
-            ManualResetEvent[] resets = new ManualResetEvent[nThreads];
-			for (int i = 0; i < nThreads; i++)
-			{
-				int seed = i;
-				threads[i] = new TestRun(seed, iter, newStrings, this);
-                resets[i] = threads[i].Reset;
-                ThreadPool.QueueUserWorkItem(new WaitCallback(threads[i].Run));
-			}
-
-            WaitHandle.WaitAll(resets);
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f1544d66/test/core/Util/TestUnicodeUtil.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestUnicodeUtil.cs b/test/core/Util/TestUnicodeUtil.cs
new file mode 100644
index 0000000..6a733a7
--- /dev/null
+++ b/test/core/Util/TestUnicodeUtil.cs
@@ -0,0 +1,173 @@
+using System;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestUnicodeUtil : LuceneTestCase
+    {
+        private Random random = new Random();
+        
+        [Test]
+        public void TestCodePointCount()
+        {
+            // Check invalid codepoints.
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0x80, 'z', 'z', 'z'));
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0xc0 - 1, 'z', 'z', 'z'));
+            // Check 5-byte and longer sequences.
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0xf8, 'z', 'z', 'z'));
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0xfc, 'z', 'z', 'z'));
+            // Check improperly terminated codepoints.
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0xc2));
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0xe2));
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0xe2, 0x82));
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0xf0));
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0xf0, 0xa4));
+            AssertcodePointCountThrowsAssertionOn(AsByteArray('z', 0xf0, 0xa4, 0xad));
+
+            // Check some typical examples (multibyte).
+            Assert.AreEqual(0, UnicodeUtil.CodePointCount(new BytesRef(AsByteArray())));
+            Assert.AreEqual(3, UnicodeUtil.CodePointCount(new BytesRef(AsByteArray('z', 'z', 'z'))));
+            Assert.AreEqual(2, UnicodeUtil.CodePointCount(new BytesRef(AsByteArray('z', 0xc2, 0xa2))));
+            Assert.AreEqual(2, UnicodeUtil.CodePointCount(new BytesRef(AsByteArray('z', 0xe2, 0x82, 0xac))));
+            Assert.AreEqual(2, UnicodeUtil.CodePointCount(new BytesRef(AsByteArray('z', 0xf0, 0xa4, 0xad, 0xa2))));
+
+            // And do some random stuff.
+            var utf8 = new BytesRef(20);
+            int num = AtLeast(50000);
+            for (var i = 0; i < num; i++)
+            {
+                string s = _TestUtil.RandomUnicodeString(random);
+                UnicodeUtil.UTF16toUTF8(s, 0, s.Length, utf8);
+                Assert.AreEqual(s.CodePointCount(0, s.Length),
+                             UnicodeUtil.CodePointCount(utf8));
+            }
+        }
+
+        private sbyte[] AsByteArray(params int[] ints)
+        {
+            var asByteArray = new sbyte[ints.Length];
+            for (var i = 0; i < ints.Length; i++)
+            {
+                asByteArray[i] = (sbyte)ints[i];
+            }
+            return asByteArray;
+        }
+
+        private void AssertcodePointCountThrowsAssertionOn(params sbyte[] bytes)
+        {
+            var threwAssertion = false;
+            try
+            {
+                UnicodeUtil.CodePointCount(new BytesRef(bytes));
+            }
+            catch (ArgumentException e)
+            {
+                threwAssertion = true;
+            }
+            Assert.IsTrue(threwAssertion);
+        }
+
+        [Test]
+        public void TestUTF8toUTF32()
+        {
+            var utf8 = new BytesRef(20);
+            var utf32 = new IntsRef(20);
+            var codePoints = new int[20];
+            int num = AtLeast(50000);
+            for (var i = 0; i < num; i++)
+            {
+                string s = _TestUtil.RandomUnicodeString(random);
+                UnicodeUtil.UTF16toUTF8(s, 0, s.Length, utf8);
+                UnicodeUtil.UTF8toUTF32(utf8, utf32);
+
+                var charUpto = 0;
+                var intUpto = 0;
+                while (charUpto < s.Length)
+                {
+                    int cp = s.CodePointAt(charUpto);
+                    codePoints[intUpto++] = cp;
+                    charUpto += Character.CharCount(cp);
+                }
+                if (!ArrayUtil.Equals(codePoints, 0, utf32.ints, utf32.offset, intUpto))
+                {
+                    Console.WriteLine("FAILED");
+                    for (int j = 0; j < s.Length; j++)
+                    {
+                        Console.WriteLine("  char[" + j + "]=" + int.ToHexString(s.CharAt(j)));
+                    }
+                    Console.WriteLine();
+                    Assert.AreEqual(intUpto, utf32.length);
+                    for (int j = 0; j < intUpto; j++)
+                    {
+                        Console.WriteLine("  " + int.ToHexString(utf32.ints[j]) + " vs " + int.ToHexString(codePoints[j]));
+                    }
+                    Fail("mismatch");
+                }
+            }
+        }
+
+        [Test]
+        public void TestNewString()
+        {
+            int[] codePoints = {
+                Character.ToCodePoint(Character.MIN_HIGH_SURROGATE,
+                    Character.MAX_LOW_SURROGATE),
+                Character.ToCodePoint(Character.MAX_HIGH_SURROGATE,
+                    Character.MIN_LOW_SURROGATE), Character.MAX_HIGH_SURROGATE, 'A',
+                -1,};
+
+            string cpString = "" + Character.MIN_HIGH_SURROGATE
+                + Character.MAX_LOW_SURROGATE + Character.MAX_HIGH_SURROGATE
+                + Character.MIN_LOW_SURROGATE + Character.MAX_HIGH_SURROGATE + 'A';
+
+            var tests = new int[,] { {0, 1, 0, 2}, {0, 2, 0, 4}, {1, 1, 2, 2},
+                {1, 2, 2, 3}, {1, 3, 2, 4}, {2, 2, 4, 2}, {2, 3, 0, -1}, {4, 5, 0, -1},
+                {3, -1, 0, -1}};
+
+            for (int i = 0; i < tests.length; ++i)
+            {
+                int[] t = tests[i];
+                var s = t[0];
+                var c = t[1];
+                var rs = t[2];
+                var rc = t[3];
+
+                try
+                {
+                    string str = UnicodeUtil.NewString(codePoints, s, c);
+                    Assert.IsFalse(rc == -1);
+                    Assert.AreEqual(cpString.Substring(rs, rs + rc), str);
+                    continue;
+                }
+                catch (IndexOutOfRangeException e1)
+                {
+                    // Ignored.
+                }
+                catch (ArgumentException e2)
+                {
+                    // Ignored.
+                }
+                Assert.IsTrue(rc == -1);
+            }
+        }
+
+        public void testUTF8UTF16CharsRef()
+        {
+            int num = AtLeast(3989);
+            for (int i = 0; i < num; i++)
+            {
+                string unicode = _TestUtil.RandomRealisticUnicodeString(random);
+                var bytesRef = new BytesRef(unicode);
+                var arr = new char[1 + random.Next(100)];
+                var offset = random.Next(arr.Length);
+                var len = random.Next(arr.Length - offset);
+                var cRef = new CharsRef(arr, offset, len);
+                UnicodeUtil.UTF8toUTF16(bytesRef, cRef);
+                Assert.AreEqual(cRef.ToString(), unicode);
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f1544d66/test/core/Util/TestVersion.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestVersion.cs b/test/core/Util/TestVersion.cs
index 20f3f43..a970f61 100644
--- a/test/core/Util/TestVersion.cs
+++ b/test/core/Util/TestVersion.cs
@@ -16,6 +16,7 @@
  */
 
 using System;
+using System.Linq;
 using NUnit.Framework;
 
 namespace Lucene.Net.Util
@@ -24,15 +25,47 @@ namespace Lucene.Net.Util
     public class TestVersion : LuceneTestCase
     {
         [Test]
-        public virtual void TestOnOrAfter()
+        public void Test()
         {
-            foreach (Version v in Enum.GetValues(typeof(Version)))
+            foreach (var v in Enum.GetValues(typeof(Version)))
             {
-                Assert.IsTrue(Version.LUCENE_CURRENT.OnOrAfter(v), string.Format("LUCENE_CURRENT must be always OnOrAfter({0})", v));
+                Assert.IsTrue("LUCENE_CURRENT must be always onOrAfter(" + v + ")", Version.LUCENE_CURRENT.OnOrAfter(v));
+            }
+            Assert.IsTrue(Version.LUCENE_40.OnOrAfter(Version.LUCENE_31));
+            Assert.IsTrue(Version.LUCENE_40.OnOrAfter(Version.LUCENE_40));
+            Assert.IsFalse(Version.LUCENE_30.OnOrAfter(Version.LUCENE_31));
+        }
+
+        [Test]
+        public void TestParseLeniently()
+        {
+            assertEquals(Version.LUCENE_40, Version.ParseLeniently("4.0"));
+            assertEquals(Version.LUCENE_40, Version.ParseLeniently("LUCENE_40"));
+            assertEquals(Version.LUCENE_CURRENT, Version.ParseLeniently("LUCENE_CURRENT"));
+        }
+
+        [Test]
+        public void TestDeprecations()
+        {
+            var values = Enum.GetValues(typeof (Version)).OfType<Version>().ToList();
+            // all but the latest version should be deprecated
+            for (int i = 0; i < values.Count; i++)
+            {
+                if (i + 1 == values.Count)
+                {
+                    Assert.AreSame(Version.LUCENE_CURRENT, values[i], "Last constant must be LUCENE_CURRENT");
+                }
+                var field = typeof(Version).GetField(Enum.GetName(typeof(Version), values[i]));
+                bool dep = field.IsDefined(typeof(ObsoleteAttribute), true);
+                if (i + 2 != values.Count)
+                {
+                    Assert.IsTrue(dep, Enum.GetName(typeof(Version), values[i]) + " should be deprecated");
+                }
+                else
+                {
+                    Assert.IsFalse(dep, Enum.GetName(typeof(Version), values[i]) + " should not be deprecated");
+                }
             }
-            Assert.IsTrue(Version.LUCENE_30.OnOrAfter(Version.LUCENE_29));
-            Assert.IsTrue(Version.LUCENE_30.OnOrAfter(Version.LUCENE_30));
-            Assert.IsFalse(Version.LUCENE_29.OnOrAfter(Version.LUCENE_30));
         }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f1544d66/test/core/Util/TestVersionComparator.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestVersionComparator.cs b/test/core/Util/TestVersionComparator.cs
new file mode 100644
index 0000000..e117900
--- /dev/null
+++ b/test/core/Util/TestVersionComparator.cs
@@ -0,0 +1,38 @@
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestVersionComparator : LuceneTestCase
+    {
+        [Test]
+        public void TestVersions()
+        {
+            var comp = StringHelper.VersionComparator;
+            Assert.IsTrue(comp.Compare("1", "2") < 0);
+            Assert.IsTrue(comp.Compare("1", "1") == 0);
+            Assert.IsTrue(comp.Compare("2", "1") > 0);
+
+            Assert.IsTrue(comp.Compare("1.1", "1") > 0);
+            Assert.IsTrue(comp.Compare("1", "1.1") < 0);
+            Assert.IsTrue(comp.Compare("1.1", "1.1") == 0);
+
+            Assert.IsTrue(comp.Compare("1.0", "1") == 0);
+            Assert.IsTrue(comp.Compare("1", "1.0") == 0);
+            Assert.IsTrue(comp.Compare("1.0.1", "1.0") > 0);
+            Assert.IsTrue(comp.Compare("1.0", "1.0.1") < 0);
+
+            Assert.IsTrue(comp.Compare("1.02.003", "1.2.3.0") == 0);
+            Assert.IsTrue(comp.Compare("1.2.3.0", "1.02.003") == 0);
+
+            Assert.IsTrue(comp.Compare("1.10", "1.9") > 0);
+            Assert.IsTrue(comp.Compare("1.9", "1.10") < 0);
+
+            Assert.IsTrue(comp.Compare("0", "1.0") < 0);
+            Assert.IsTrue(comp.Compare("00", "1.0") < 0);
+            Assert.IsTrue(comp.Compare("-1.0", "1.0") < 0);
+            Assert.IsTrue(comp.Compare("3.0", int.MinValue.ToString()) > 0);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/f1544d66/test/core/Util/TestVirtualMethod.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestVirtualMethod.cs b/test/core/Util/TestVirtualMethod.cs
new file mode 100644
index 0000000..429cedf
--- /dev/null
+++ b/test/core/Util/TestVirtualMethod.cs
@@ -0,0 +1,88 @@
+using System;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestVirtualMethod : LuceneTestCase
+    {
+        private static readonly VirtualMethod<TestVirtualMethod> publicTestMethod =
+            new VirtualMethod<TestVirtualMethod>(typeof(TestVirtualMethod), "publicTest", typeof(string));
+        private static VirtualMethod<TestVirtualMethod> protectedTestMethod =
+            new VirtualMethod<TestVirtualMethod>(typeof(TestVirtualMethod), "protectedTest", typeof(int));
+
+        public void publicTest(string test) { }
+        protected void protectedTest(int test) { }
+
+        internal class TestClass1 : TestVirtualMethod
+        {
+            public override void publicTest(string test) { }
+            protected override void protectedTest(int test) { }
+        }
+
+        internal class TestClass2 : TestClass1
+        {
+            public override void protectedTest(int test) { }
+        }
+
+        internal class TestClass3 : TestClass2
+        {
+            public override void publicTest(string test) { }
+        }
+
+        internal class TestClass4 : TestVirtualMethod
+        {
+        }
+
+        internal class TestClass5 : TestClass4
+        {
+        }
+
+        [Test]
+        public void TestGeneral()
+        {
+            Assert.AreEqual(0, publicTestMethod.GetImplementationDistance(this.GetType()));
+            Assert.AreEqual(1, publicTestMethod.GetImplementationDistance(typeof(TestClass1)));
+            Assert.AreEqual(1, publicTestMethod.GetImplementationDistance(typeof(TestClass2)));
+            Assert.AreEqual(3, publicTestMethod.GetImplementationDistance(typeof(TestClass3)));
+            Assert.IsFalse(publicTestMethod.IsOverriddenAsOf(typeof(TestClass4)));
+            Assert.IsFalse(publicTestMethod.IsOverriddenAsOf(typeof(TestClass5)));
+
+            Assert.AreEqual(0, protectedTestMethod.GetImplementationDistance(this.GetType()));
+            Assert.AreEqual(1, protectedTestMethod.GetImplementationDistance(typeof(TestClass1)));
+            Assert.AreEqual(2, protectedTestMethod.GetImplementationDistance(typeof(TestClass2)));
+            Assert.AreEqual(2, protectedTestMethod.GetImplementationDistance(typeof(TestClass3)));
+            Assert.IsFalse(protectedTestMethod.IsOverriddenAsOf(typeof(TestClass4)));
+            Assert.IsFalse(protectedTestMethod.IsOverriddenAsOf(typeof(TestClass5)));
+
+            Assert.IsTrue(VirtualMethod.compareImplementationDistance(typeof(TestClass3), publicTestMethod, protectedTestMethod) > 0);
+            Assert.AreEqual(0, VirtualMethod.compareImplementationDistance(typeof(TestClass5), publicTestMethod, protectedTestMethod));
+        }
+
+        [Test]
+        public void TestExceptions()
+        {
+            Assert.Throws<ArgumentException>(() =>
+                {
+                    publicTestMethod.GetImplementationDistance((Type)typeof(LuceneTestCase));
+                }, "LuceneTestCase is not a subclass and can never override publicTest(string)")
+
+            Assert.Throws<ArgumentException>(() =>
+                {
+                    new VirtualMethod<TestVirtualMethod>(typeof(TestVirtualMethod), "bogus");
+                }, "Method bogus() does not exist, so IAE should be thrown");
+
+            Assert.Throws<ArgumentException>(() =>
+                {
+                    new VirtualMethod<TestClass2>(typeof(TestClass2), "publicTest", typeof(string));
+                }, "Method publicTest(string) is not declared in TestClass2, so IAE should be thrown");
+
+            Assert.Throws<InvalidOperationException>(() =>
+                {
+                    new VirtualMethod<TestVirtualMethod>(typeof(TestVirtualMethod), "publicTest", typeof(string));
+                }, "Violating singleton status succeeded");
+
+        }
+    }
+}


[19/50] [abbrv] Massive cleanup, reducing compiler errors

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs b/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
index d91cc23..6aef620 100644
--- a/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
+++ b/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
@@ -10,121 +10,121 @@ using System.Text;
 
 namespace Lucene.Net.Codecs.Compressing
 {
-    public sealed class CompressingTermVectorsWriter: TermVectorsWriter
+    public sealed class CompressingTermVectorsWriter : TermVectorsWriter
     {
-        public static int MAX_DOCUMENTS_PER_CHUNK = 128;
+        public const int MAX_DOCUMENTS_PER_CHUNK = 128;
 
-        static string VECTORS_EXTENSION = "tvd";
-        static string VECTORS_INDEX_EXTENSION = "tvx";
+        internal const string VECTORS_EXTENSION = "tvd";
+        internal const string VECTORS_INDEX_EXTENSION = "tvx";
 
-        static string CODEC_SFX_IDX = "Index";
-        static string CODEC_SFX_DAT = "Data";
+        internal const string CODEC_SFX_IDX = "Index";
+        internal const string CODEC_SFX_DAT = "Data";
 
-        static int VERSION_START = 0;
-        static int VERSION_CURRENT = VERSION_START;
+        internal const int VERSION_START = 0;
+        internal const int VERSION_CURRENT = VERSION_START;
 
-        static int BLOCK_SIZE = 64;
+        internal const int BLOCK_SIZE = 64;
 
-        static int POSITIONS = 0x01;
-        static int   OFFSETS = 0x02;
-        static int  PAYLOADS = 0x04;
-        static int FLAGS_BITS = PackedInts.BitsRequired(POSITIONS | OFFSETS | PAYLOADS);
+        internal const int POSITIONS = 0x01;
+        internal const int OFFSETS = 0x02;
+        internal const int PAYLOADS = 0x04;
+        internal static readonly int FLAGS_BITS = PackedInts.BitsRequired(POSITIONS | OFFSETS | PAYLOADS);
 
-        private Directory directory;
-        private string segment;
-        private string segmentSuffix;
+        private readonly Directory directory;
+        private readonly string segment;
+        private readonly string segmentSuffix;
         private CompressingStoredFieldsIndexWriter indexWriter;
         private IndexOutput vectorsStream;
 
-        private CompressionMode compressionMode;
-        private Compressor compressor;
-        private int chunkSize;
+        private readonly CompressionMode compressionMode;
+        private readonly Compressor compressor;
+        private readonly int chunkSize;
 
-        private int numDocs; // total number of docs seen
-        private Deque<DocData> pendingDocs; // pending docs
-        private DocData curDoc; // current document
-        private FieldData curField; // current field
-        private BytesRef lastTerm;
-        private int[] positionsBuf, startOffsetsBuf, lengthsBuf, payloadLengthsBuf;
-        private GrowableByteArrayDataOutput termSuffixes; // buffered term suffixes
-        private GrowableByteArrayDataOutput payloadBytes; // buffered term payloads
-        private BlockPackedWriter writer;
-        
         /** a pending doc */
-        private class DocData 
+        private class DocData
         {
-            int numFields;
-            Deque<FieldData> fields;
-            int posStart, offStart, payStart;
-            DocData(int numFields, int posStart, int offStart, int payStart) {
+            internal readonly int numFields;
+            internal readonly LinkedList<FieldData> fields;
+            internal readonly int posStart, offStart, payStart;
+
+            private readonly CompressingTermVectorsWriter parent;
+
+            internal DocData(CompressingTermVectorsWriter parent, int numFields, int posStart, int offStart, int payStart)
+            {
+                this.parent = parent; // .NET Port
+
                 this.numFields = numFields;
-                this.fields = new ArrayDeque<FieldData>(numFields);
+                this.fields = new LinkedList<FieldData>();
                 this.posStart = posStart;
                 this.offStart = offStart;
                 this.payStart = payStart;
             }
 
-            FieldData addField(int fieldNum, int numTerms, bool positions, bool offsets, bool payloads) 
+            internal FieldData AddField(int fieldNum, int numTerms, bool positions, bool offsets, bool payloads)
             {
                 FieldData field;
-                if (fields.isEmpty()) 
+                if (fields.Count == 0)
                 {
-                    field = new FieldData(fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
-                } 
-                else 
+                    field = new FieldData(parent, fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
+                }
+                else
                 {
-                    FieldData last = fields.getLast();
+                    FieldData last = fields.Last.Value;
                     int posStart = last.posStart + (last.hasPositions ? last.totalPositions : 0);
                     int offStart = last.offStart + (last.hasOffsets ? last.totalPositions : 0);
                     int payStart = last.payStart + (last.hasPayloads ? last.totalPositions : 0);
-                    field = new FieldData(fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
+                    field = new FieldData(parent, fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
                 }
-                fields.add(field);
+                fields.AddLast(field);
                 return field;
             }
         }
 
-        private DocData addDocData(int numVectorFields) 
+        private DocData AddDocData(int numVectorFields)
         {
             FieldData last = null;
-            for (Iterator<DocData> it = pendingDocs.descendingIterator(); it.hasNext(); ) 
+            foreach (DocData doc in pendingDocs.Reverse())
             {
-                final DocData doc = it.next();
-                if (!doc.fields.isEmpty()) 
+                //DocData doc = it.next();
+                if (doc.fields.Count > 0)
                 {
-                    last = doc.fields.getLast();
+                    last = doc.fields.Last.Value;
                     break;
                 }
             }
 
-            DocData doc;
-            if (last == null) 
+            DocData doc2;
+            if (last == null)
             {
-                doc = new DocData(numVectorFields, 0, 0, 0);
-            } 
-            else 
+                doc2 = new DocData(this, numVectorFields, 0, 0, 0);
+            }
+            else
             {
                 int posStart = last.posStart + (last.hasPositions ? last.totalPositions : 0);
                 int offStart = last.offStart + (last.hasOffsets ? last.totalPositions : 0);
                 int payStart = last.payStart + (last.hasPayloads ? last.totalPositions : 0);
-                doc = new DocData(numVectorFields, posStart, offStart, payStart);
+                doc2 = new DocData(this, numVectorFields, posStart, offStart, payStart);
             }
-            pendingDocs.add(doc);
-            return doc;
+            pendingDocs.AddLast(doc2);
+            return doc2;
         }
 
         /** a pending field */
-        private class FieldData 
+        private class FieldData
         {
-            bool hasPositions, hasOffsets, hasPayloads;
-            int fieldNum, flags, numTerms;
-            int[] freqs, prefixLengths, suffixLengths;
-            int posStart, offStart, payStart;
-            int totalPositions;
-            int ord;
+            internal readonly bool hasPositions, hasOffsets, hasPayloads;
+            internal readonly int fieldNum, flags, numTerms;
+            internal readonly int[] freqs, prefixLengths, suffixLengths;
+            internal readonly int posStart, offStart, payStart;
+            internal int totalPositions;
+            internal int ord;
+
+            private readonly CompressingTermVectorsWriter parent;
 
-            public FieldData(int fieldNum, int numTerms, bool positions, bool offsets, bool payloads, int posStart, int offStart, int payStart) 
+            public FieldData(CompressingTermVectorsWriter parent, int fieldNum, int numTerms, bool positions, bool offsets, bool payloads, int posStart, int offStart, int payStart)
             {
+                this.parent = parent; // .NET Port
+
                 this.fieldNum = fieldNum;
                 this.numTerms = numTerms;
                 this.hasPositions = positions;
@@ -141,48 +141,61 @@ namespace Lucene.Net.Codecs.Compressing
                 ord = 0;
             }
 
-            public void addTerm(int freq, int prefixLength, int suffixLength) 
+            public void AddTerm(int freq, int prefixLength, int suffixLength)
             {
-              freqs[ord] = freq;
-              prefixLengths[ord] = prefixLength;
-              suffixLengths[ord] = suffixLength;
-              ++ord;
+                freqs[ord] = freq;
+                prefixLengths[ord] = prefixLength;
+                suffixLengths[ord] = suffixLength;
+                ++ord;
             }
-            
-            public void addPosition(int position, int startOffset, int length, int payloadLength) 
+
+            public void AddPosition(int position, int startOffset, int length, int payloadLength)
             {
-              if (hasPositions) 
-              {
-                if (posStart + totalPositions == positionsBuf.length) 
+                if (hasPositions)
                 {
-                  positionsBuf = ArrayUtil.grow(positionsBuf);
-                }
+                    if (posStart + totalPositions == parent.positionsBuf.Length)
+                    {
+                        parent.positionsBuf = ArrayUtil.Grow(parent.positionsBuf);
+                    }
 
-                positionsBuf[posStart + totalPositions] = position;
-              }
-              if (hasOffsets) {
-                if (offStart + totalPositions == startOffsetsBuf.length) 
+                    parent.positionsBuf[posStart + totalPositions] = position;
+                }
+                if (hasOffsets)
                 {
-                  int newLength = ArrayUtil.Oversize(offStart + totalPositions, 4);
-                  startOffsetsBuf = Arrays.CopyOf(startOffsetsBuf, newLength);
-                  lengthsBuf = Arrays.CopyOf(lengthsBuf, newLength);
+                    if (offStart + totalPositions == parent.startOffsetsBuf.Length)
+                    {
+                        int newLength = ArrayUtil.Oversize(offStart + totalPositions, 4);
+                        parent.startOffsetsBuf = Arrays.CopyOf(parent.startOffsetsBuf, newLength);
+                        parent.lengthsBuf = Arrays.CopyOf(parent.lengthsBuf, newLength);
+                    }
+                    parent.startOffsetsBuf[offStart + totalPositions] = startOffset;
+                    parent.lengthsBuf[offStart + totalPositions] = length;
                 }
-                startOffsetsBuf[offStart + totalPositions] = startOffset;
-                lengthsBuf[offStart + totalPositions] = length;
-              }
-              if (hasPayloads) {
-                if (payStart + totalPositions == payloadLengthsBuf.length) {
-                  payloadLengthsBuf = ArrayUtil.Grow(payloadLengthsBuf);
+                if (hasPayloads)
+                {
+                    if (payStart + totalPositions == parent.payloadLengthsBuf.Length)
+                    {
+                        parent.payloadLengthsBuf = ArrayUtil.Grow(parent.payloadLengthsBuf);
+                    }
+                    parent.payloadLengthsBuf[payStart + totalPositions] = payloadLength;
                 }
-                payloadLengthsBuf[payStart + totalPositions] = payloadLength;
-              }
-              ++totalPositions;
+                ++totalPositions;
             }
         }
 
+        private int numDocs; // total number of docs seen
+        private readonly LinkedList<DocData> pendingDocs; // pending docs
+        private DocData curDoc; // current document
+        private FieldData curField; // current field
+        private readonly BytesRef lastTerm;
+        private int[] positionsBuf, startOffsetsBuf, lengthsBuf, payloadLengthsBuf;
+        private readonly GrowableByteArrayDataOutput termSuffixes; // buffered term suffixes
+        private readonly GrowableByteArrayDataOutput payloadBytes; // buffered term payloads
+        private readonly BlockPackedWriter writer;
+
         /** Sole constructor. */
         public CompressingTermVectorsWriter(Directory directory, SegmentInfo si, string segmentSuffix, IOContext context,
-            String formatName, CompressionMode compressionMode, int chunkSize) 
+            String formatName, CompressionMode compressionMode, int chunkSize)
         {
             this.directory = directory;
             this.segment = si.name;
@@ -192,20 +205,21 @@ namespace Lucene.Net.Codecs.Compressing
             this.chunkSize = chunkSize;
 
             numDocs = 0;
-            pendingDocs = new ArrayDeque<DocData>();
+            pendingDocs = new LinkedList<DocData>();
             termSuffixes = new GrowableByteArrayDataOutput(ArrayUtil.Oversize(chunkSize, 1));
             payloadBytes = new GrowableByteArrayDataOutput(ArrayUtil.Oversize(1, 1));
             lastTerm = new BytesRef(ArrayUtil.Oversize(30, 1));
 
             bool success = false;
             IndexOutput indexStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION), context);
-            try {
+            try
+            {
                 vectorsStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION), context);
 
                 string codecNameIdx = formatName + CODEC_SFX_IDX;
                 string codecNameDat = formatName + CODEC_SFX_DAT;
-                CodecUtil.writeHeader(indexStream, codecNameIdx, VERSION_CURRENT);
-                CodecUtil.writeHeader(vectorsStream, codecNameDat, VERSION_CURRENT);
+                CodecUtil.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT);
+                CodecUtil.WriteHeader(vectorsStream, codecNameDat, VERSION_CURRENT);
 
                 indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
                 indexStream = null;
@@ -220,27 +234,31 @@ namespace Lucene.Net.Codecs.Compressing
                 payloadLengthsBuf = new int[1024];
 
                 success = true;
-            } finally {
-                if (!success) {
-                IOUtils.CloseWhileHandlingException(indexStream);
-                Abort();
+            }
+            finally
+            {
+                if (!success)
+                {
+                    IOUtils.CloseWhileHandlingException((IDisposable)indexStream);
+                    Abort();
                 }
             }
         }
 
         public override void StartDocument(int numVectorFields)
         {
-            curDoc = addDocData(numVectorFields);
+            curDoc = AddDocData(numVectorFields);
         }
 
-        public override void FinishDocument() 
+        public override void FinishDocument()
         {
             // append the payload bytes of the doc after its terms
             termSuffixes.WriteBytes(payloadBytes.Bytes, payloadBytes.Length);
             payloadBytes.Length = 0;
             ++numDocs;
-            if (triggerFlush()) {
-              Flush();
+            if (TriggerFlush())
+            {
+                Flush();
             }
             curDoc = null;
         }
@@ -259,11 +277,12 @@ namespace Lucene.Net.Codecs.Compressing
         public override void StartTerm(Util.BytesRef term, int freq)
         {
             int prefix = StringHelper.BytesDifference(lastTerm, term);
-            curField.addTerm(freq, prefix, term.length - prefix);
+            curField.AddTerm(freq, prefix, term.length - prefix);
             termSuffixes.WriteBytes(term.bytes, term.offset + prefix, term.length - prefix);
             // copy last term
-            if (lastTerm.bytes.Length < term.length) {
-              lastTerm.bytes = new sbyte[ArrayUtil.Oversize(term.length, 1)];
+            if (lastTerm.bytes.Length < term.length)
+            {
+                lastTerm.bytes = new sbyte[ArrayUtil.Oversize(term.length, 1)];
             }
             lastTerm.offset = 0;
             lastTerm.length = term.length;
@@ -272,346 +291,412 @@ namespace Lucene.Net.Codecs.Compressing
 
         public override void AddPosition(int position, int startOffset, int endOffset, Util.BytesRef payload)
         {
-            curField.addPosition(position, startOffset, endOffset - startOffset, payload == null ? 0 : payload.length);
-            if (curField.HasPayloads && payload != null)
+            curField.AddPosition(position, startOffset, endOffset - startOffset, payload == null ? 0 : payload.length);
+            if (curField.hasPayloads && payload != null)
             {
                 payloadBytes.WriteBytes(payload.bytes, payload.offset, payload.length);
             }
         }
 
-        private bool triggerFlush()
+        private bool TriggerFlush()
         {
             return termSuffixes.Length >= chunkSize
-                || pendingDocs.size() >= MAX_DOCUMENTS_PER_CHUNK;
+                || pendingDocs.Count >= MAX_DOCUMENTS_PER_CHUNK;
         }
 
-        private void flush() 
+        private void Flush()
         {
-            int chunkDocs = pendingDocs.size();
+            int chunkDocs = pendingDocs.Count;
 
             // write the index file
-            indexWriter.WriteIndex(chunkDocs, vectorsStream.GetFilePointer());
+            indexWriter.WriteIndex(chunkDocs, vectorsStream.FilePointer);
 
             int docBase = numDocs - chunkDocs;
             vectorsStream.WriteVInt(docBase);
             vectorsStream.WriteVInt(chunkDocs);
 
             // total number of fields of the chunk
-            int totalFields = flushNumFields(chunkDocs);
-
-            if (totalFields > 0) {
-              // unique field numbers (sorted)
-              int[] fieldNums = flushFieldNums();
-              // offsets in the array of unique field numbers
-              flushFields(totalFields, fieldNums);
-              // flags (does the field have positions, offsets, payloads?)
-              flushFlags(totalFields, fieldNums);
-              // number of terms of each field
-              flushNumTerms(totalFields);
-              // prefix and suffix lengths for each field
-              flushTermLengths();
-              // term freqs - 1 (because termFreq is always >=1) for each term
-              flushTermFreqs();
-              // positions for all terms, when enabled
-              flushPositions();
-              // offsets for all terms, when enabled
-              flushOffsets(fieldNums);
-              // payload lengths for all terms, when enabled
-              flushPayloadLengths();
-
-              // compress terms and payloads and write them to the output
-              compressor.Compress(termSuffixes.Bytes, 0, termSuffixes.Length, vectorsStream);
+            int totalFields = FlushNumFields(chunkDocs);
+
+            if (totalFields > 0)
+            {
+                // unique field numbers (sorted)
+                int[] fieldNums = FlushFieldNums();
+                // offsets in the array of unique field numbers
+                FlushFields(totalFields, fieldNums);
+                // flags (does the field have positions, offsets, payloads?)
+                FlushFlags(totalFields, fieldNums);
+                // number of terms of each field
+                FlushNumTerms(totalFields);
+                // prefix and suffix lengths for each field
+                FlushTermLengths();
+                // term freqs - 1 (because termFreq is always >=1) for each term
+                FlushTermFreqs();
+                // positions for all terms, when enabled
+                FlushPositions();
+                // offsets for all terms, when enabled
+                FlushOffsets(fieldNums);
+                // payload lengths for all terms, when enabled
+                FlushPayloadLengths();
+
+                // compress terms and payloads and write them to the output
+                compressor.Compress(termSuffixes.Bytes, 0, termSuffixes.Length, vectorsStream);
             }
 
             // reset
-            pendingDocs.clear();
+            pendingDocs.Clear();
             curDoc = null;
             curField = null;
             termSuffixes.Length = 0;
         }
 
-        private int flushNumFields(int chunkDocs) 
+        private int FlushNumFields(int chunkDocs)
         {
-            if (chunkDocs == 1) {
-              int numFields = pendingDocs.getFirst().numFields;
-              vectorsStream.WriteVInt(numFields);
-              return numFields;
-            } else {
-              writer.Reset(vectorsStream);
-              int totalFields = 0;
-              for (DocData dd : pendingDocs) {
-                writer.Add(dd.numFields);
-                totalFields += dd.numFields;
-              }
-              writer.Finish();
-              return totalFields;
+            if (chunkDocs == 1)
+            {
+                int numFields = pendingDocs.First.Value.numFields;
+                vectorsStream.WriteVInt(numFields);
+                return numFields;
+            }
+            else
+            {
+                writer.Reset(vectorsStream);
+                int totalFields = 0;
+                foreach (DocData dd in pendingDocs)
+                {
+                    writer.Add(dd.numFields);
+                    totalFields += dd.numFields;
+                }
+                writer.Finish();
+                return totalFields;
             }
         }
 
-          /** Returns a sorted array containing unique field numbers */
-        private int[] flushFieldNums()
+        /** Returns a sorted array containing unique field numbers */
+        private int[] FlushFieldNums()
         {
-            SortedSet<int> fieldNums = new TreeSet<int>();
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                fieldNums.Add(fd.fieldNum);
+            SortedSet<int> fieldNums = new SortedSet<int>();
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    fieldNums.Add(fd.fieldNum);
                 }
             }
 
-            int numDistinctFields = fieldNums.size();
-            int bitsRequired = PackedInts.bitsRequired(fieldNums.Last());
+            int numDistinctFields = fieldNums.Count;
+            int bitsRequired = PackedInts.BitsRequired(fieldNums.Last());
             int token = (Math.Min(numDistinctFields - 1, 0x07) << 5) | bitsRequired;
-            vectorsStream.WriteByte((byte) token);
-            if (numDistinctFields - 1 >= 0x07) {
+            vectorsStream.WriteByte((byte)token);
+            if (numDistinctFields - 1 >= 0x07)
+            {
                 vectorsStream.WriteVInt(numDistinctFields - 1 - 0x07);
             }
-            PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldNums.size(), bitsRequired, 1);
-            for (int fieldNum : fieldNums) {
+            PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldNums.Count, bitsRequired, 1);
+            foreach (int fieldNum in fieldNums)
+            {
                 writer.Add(fieldNum);
             }
             writer.Finish();
 
-            int[] fns = new int[fieldNums.size()];
+            int[] fns = new int[fieldNums.Count];
             int i = 0;
-            for (int key : fieldNums) {
+            foreach (int key in fieldNums)
+            {
                 fns[i++] = key;
             }
             return fns;
         }
 
-        private void flushFields(int totalFields, int[] fieldNums) throws IOException {
-            final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, PackedInts.bitsRequired(fieldNums.length - 1), 1);
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                final int fieldNumIndex = Arrays.binarySearch(fieldNums, fd.fieldNum);
-                assert fieldNumIndex >= 0;
-                writer.add(fieldNumIndex);
+        private void FlushFields(int totalFields, int[] fieldNums)
+        {
+            PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, PackedInts.BitsRequired(fieldNums.Length - 1), 1);
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    int fieldNumIndex = Array.BinarySearch(fieldNums, fd.fieldNum);
+                    //assert fieldNumIndex >= 0;
+                    writer.Add(fieldNumIndex);
                 }
             }
-            writer.finish();
+            writer.Finish();
         }
 
-        private void flushFlags(int totalFields, int[] fieldNums) 
+        private void FlushFlags(int totalFields, int[] fieldNums)
         {
             // check if fields always have the same flags
             bool nonChangingFlags = true;
             int[] fieldFlags = new int[fieldNums.Length];
             Arrays.Fill(fieldFlags, -1);
-            outer:
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                int fieldNumOff = Arrays.BinarySearch(fieldNums, fd.ieldNum);
-                if (fieldFlags[fieldNumOff] == -1) {
-                    fieldFlags[fieldNumOff] = fd.flags;
-                } else if (fieldFlags[fieldNumOff] != fd.flags) {
-                    nonChangingFlags = false;
-                    break outer;
-                }
+            bool shouldBreakOuter;
+            foreach (DocData dd in pendingDocs)
+            {
+                shouldBreakOuter = false;
+                foreach (FieldData fd in dd.fields)
+                {
+                    int fieldNumOff = Array.BinarySearch(fieldNums, fd.fieldNum);
+                    if (fieldFlags[fieldNumOff] == -1)
+                    {
+                        fieldFlags[fieldNumOff] = fd.flags;
+                    }
+                    else if (fieldFlags[fieldNumOff] != fd.flags)
+                    {
+                        nonChangingFlags = false;
+                        shouldBreakOuter = true;
+                    }
                 }
+
+                if (shouldBreakOuter)
+                    break;
             }
 
-            if (nonChangingFlags) {
+            if (nonChangingFlags)
+            {
                 // write one flag per field num
                 vectorsStream.WriteVInt(0);
-                PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldFlags.length, FLAGS_BITS, 1);
-                for (int flags : fieldFlags) {
-                writer.Add(flags);
+                PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldFlags.Length, FLAGS_BITS, 1);
+                foreach (int flags in fieldFlags)
+                {
+                    writer.Add(flags);
                 }
                 writer.Finish();
-            } else {
+            }
+            else
+            {
                 // write one flag for every field instance
                 vectorsStream.WriteVInt(1);
                 PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, FLAGS_BITS, 1);
-                for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                    writer.add(fd.flags);
-                }
+                foreach (DocData dd in pendingDocs)
+                {
+                    foreach (FieldData fd in dd.fields)
+                    {
+                        writer.Add(fd.flags);
+                    }
                 }
                 writer.Finish();
             }
         }
 
-        private void flushNumTerms(int totalFields) 
+        private void FlushNumTerms(int totalFields)
         {
             int maxNumTerms = 0;
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                maxNumTerms |= fd.numTerms;
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    maxNumTerms |= fd.numTerms;
                 }
             }
-            
-            int bitsRequired = PackedInts.bitsRequired(maxNumTerms);
+
+            int bitsRequired = PackedInts.BitsRequired(maxNumTerms);
             vectorsStream.WriteVInt(bitsRequired);
-            PackedInts.Writer writer = PackedInts.getWriterNoHeader(
+            PackedInts.Writer writer = PackedInts.GetWriterNoHeader(
                 vectorsStream, PackedInts.Format.PACKED, totalFields, bitsRequired, 1);
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                writer.add(fd.numTerms);
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    writer.Add(fd.numTerms);
                 }
             }
-            writer.finish();
+            writer.Finish();
         }
 
-        private void flushTermLengths() 
+        private void FlushTermLengths()
         {
-            writer.reset(vectorsStream);
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                for (int i = 0; i < fd.numTerms; ++i) {
-                    writer.add(fd.prefixLengths[i]);
-                }
+            writer.Reset(vectorsStream);
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    for (int i = 0; i < fd.numTerms; ++i)
+                    {
+                        writer.Add(fd.prefixLengths[i]);
+                    }
                 }
             }
-            writer.finish();
-            writer.reset(vectorsStream);
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                for (int i = 0; i < fd.numTerms; ++i) {
-                    writer.add(fd.suffixLengths[i]);
-                }
+            writer.Finish();
+            writer.Reset(vectorsStream);
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    for (int i = 0; i < fd.numTerms; ++i)
+                    {
+                        writer.Add(fd.suffixLengths[i]);
+                    }
                 }
             }
-            writer.finish();
+            writer.Finish();
         }
 
-        private void flushTermFreqs() 
+        private void FlushTermFreqs()
         {
-            writer.reset(vectorsStream);
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                for (int i = 0; i < fd.numTerms; ++i) {
-                    writer.add(fd.freqs[i] - 1);
-                }
+            writer.Reset(vectorsStream);
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    for (int i = 0; i < fd.numTerms; ++i)
+                    {
+                        writer.Add(fd.freqs[i] - 1);
+                    }
                 }
             }
-            writer.finish();
+            writer.Finish();
         }
 
-        private void flushPositions()
+        private void FlushPositions()
         {
-            writer.reset(vectorsStream);
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                if (fd.hasPositions) {
-                    int pos = 0;
-                    for (int i = 0; i < fd.numTerms; ++i) {
-                    int previousPosition = 0;
-                    for (int j = 0; j < fd.freqs[i]; ++j) {
-                        int position = positionsBuf[fd .posStart + pos++];
-                        writer.add(position - previousPosition);
-                        previousPosition = position;
-                    }
+            writer.Reset(vectorsStream);
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    if (fd.hasPositions)
+                    {
+                        int pos = 0;
+                        for (int i = 0; i < fd.numTerms; ++i)
+                        {
+                            int previousPosition = 0;
+                            for (int j = 0; j < fd.freqs[i]; ++j)
+                            {
+                                int position = positionsBuf[fd.posStart + pos++];
+                                writer.Add(position - previousPosition);
+                                previousPosition = position;
+                            }
+                        }
                     }
                 }
-                }
             }
-            writer.finish();
+            writer.Finish();
         }
 
-        private void flushOffsets(int[] fieldNums) 
+        private void FlushOffsets(int[] fieldNums)
         {
             bool hasOffsets = false;
-            long[] sumPos = new long[fieldNums.length];
-            long[] sumOffsets = new long[fieldNums.length];
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                hasOffsets |= fd.hasOffsets;
-                if (fd.hasOffsets && fd.hasPositions) {
-                    int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
-                    int pos = 0;
-                    for (int i = 0; i < fd.numTerms; ++i) {
-                    int previousPos = 0;
-                    int previousOff = 0;
-                    for (int j = 0; j < fd.freqs[i]; ++j) {
-                        int position = positionsBuf[fd.posStart + pos];
-                        int startOffset = startOffsetsBuf[fd.offStart + pos];
-                        sumPos[fieldNumOff] += position - previousPos;
-                        sumOffsets[fieldNumOff] += startOffset - previousOff;
-                        previousPos = position;
-                        previousOff = startOffset;
-                        ++pos;
-                    }
+            long[] sumPos = new long[fieldNums.Length];
+            long[] sumOffsets = new long[fieldNums.Length];
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    hasOffsets |= fd.hasOffsets;
+                    if (fd.hasOffsets && fd.hasPositions)
+                    {
+                        int fieldNumOff = Array.BinarySearch(fieldNums, fd.fieldNum);
+                        int pos = 0;
+                        for (int i = 0; i < fd.numTerms; ++i)
+                        {
+                            int previousPos = 0;
+                            int previousOff = 0;
+                            for (int j = 0; j < fd.freqs[i]; ++j)
+                            {
+                                int position = positionsBuf[fd.posStart + pos];
+                                int startOffset = startOffsetsBuf[fd.offStart + pos];
+                                sumPos[fieldNumOff] += position - previousPos;
+                                sumOffsets[fieldNumOff] += startOffset - previousOff;
+                                previousPos = position;
+                                previousOff = startOffset;
+                                ++pos;
+                            }
+                        }
                     }
                 }
-                }
             }
 
-            if (!hasOffsets) {
+            if (!hasOffsets)
+            {
                 // nothing to do
                 return;
             }
 
-            float[] charsPerTerm = new float[fieldNums.length];
-            for (int i = 0; i < fieldNums.length; ++i) {
-                charsPerTerm[i] = (sumPos[i] <= 0 || sumOffsets[i] <= 0) ? 0 : (float) ((double) sumOffsets[i] / sumPos[i]);
+            float[] charsPerTerm = new float[fieldNums.Length];
+            for (int i = 0; i < fieldNums.Length; ++i)
+            {
+                charsPerTerm[i] = (sumPos[i] <= 0 || sumOffsets[i] <= 0) ? 0 : (float)((double)sumOffsets[i] / sumPos[i]);
             }
 
             // start offsets
-            for (int i = 0; i < fieldNums.length; ++i) {
-                vectorsStream.writeInt(Float.floatToRawIntBits(charsPerTerm[i]));
-            }
-
-            writer.reset(vectorsStream);
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                if ((fd.flags & OFFSETS) != 0) {
-                    int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
-                    float cpt = charsPerTerm[fieldNumOff];
-                    int pos = 0;
-                    for (int i = 0; i < fd.numTerms; ++i) {
-                    int previousPos = 0;
-                    int previousOff = 0;
-                    for (int j = 0; j < fd.freqs[i]; ++j) {
-                        final int position = fd.hasPositions ? positionsBuf[fd.posStart + pos] : 0;
-                        final int startOffset = startOffsetsBuf[fd.offStart + pos];
-                        writer.add(startOffset - previousOff - (int) (cpt * (position - previousPos)));
-                        previousPos = position;
-                        previousOff = startOffset;
-                        ++pos;
-                    }
+            for (int i = 0; i < fieldNums.Length; ++i)
+            {
+                vectorsStream.WriteInt(Number.FloatToIntBits(charsPerTerm[i]));
+            }
+
+            writer.Reset(vectorsStream);
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    if ((fd.flags & OFFSETS) != 0)
+                    {
+                        int fieldNumOff = Array.BinarySearch(fieldNums, fd.fieldNum);
+                        float cpt = charsPerTerm[fieldNumOff];
+                        int pos = 0;
+                        for (int i = 0; i < fd.numTerms; ++i)
+                        {
+                            int previousPos = 0;
+                            int previousOff = 0;
+                            for (int j = 0; j < fd.freqs[i]; ++j)
+                            {
+                                int position = fd.hasPositions ? positionsBuf[fd.posStart + pos] : 0;
+                                int startOffset = startOffsetsBuf[fd.offStart + pos];
+                                writer.Add(startOffset - previousOff - (int)(cpt * (position - previousPos)));
+                                previousPos = position;
+                                previousOff = startOffset;
+                                ++pos;
+                            }
+                        }
                     }
                 }
-                }
             }
-            writer.finish();
+            writer.Finish();
 
             // lengths
-            writer.reset(vectorsStream);
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                if ((fd.flags & OFFSETS) != 0) {
-                    int pos = 0;
-                    for (int i = 0; i < fd.numTerms; ++i) {
-                    for (int j = 0; j < fd.freqs[i]; ++j) {
-                        writer.add(lengthsBuf[fd.offStart + pos++] - fd.prefixLengths[i] - fd.suffixLengths[i]);
-                    }
+            writer.Reset(vectorsStream);
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    if ((fd.flags & OFFSETS) != 0)
+                    {
+                        int pos = 0;
+                        for (int i = 0; i < fd.numTerms; ++i)
+                        {
+                            for (int j = 0; j < fd.freqs[i]; ++j)
+                            {
+                                writer.Add(lengthsBuf[fd.offStart + pos++] - fd.prefixLengths[i] - fd.suffixLengths[i]);
+                            }
+                        }
                     }
                 }
-                }
             }
-            writer.finish();
+            writer.Finish();
         }
 
-        private void flushPayloadLengths() 
+        private void FlushPayloadLengths()
         {
-            writer.reset(vectorsStream);
-            for (DocData dd : pendingDocs) {
-                for (FieldData fd : dd.fields) {
-                if (fd.hasPayloads) {
-                    for (int i = 0; i < fd.totalPositions; ++i) {
-                    writer.add(payloadLengthsBuf[fd.payStart + i]);
+            writer.Reset(vectorsStream);
+            foreach (DocData dd in pendingDocs)
+            {
+                foreach (FieldData fd in dd.fields)
+                {
+                    if (fd.hasPayloads)
+                    {
+                        for (int i = 0; i < fd.totalPositions; ++i)
+                        {
+                            writer.Add(payloadLengthsBuf[fd.payStart + i]);
+                        }
                     }
                 }
-                }
             }
-            writer.finish();
+            writer.Finish();
         }
 
-
-
         public override void Abort()
         {
-            IOUtils.CloseWhileHandlingException(this);
+            IOUtils.CloseWhileHandlingException((IDisposable)this);
             IOUtils.DeleteFilesIgnoringExceptions(directory,
             IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION),
             IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION));
@@ -619,171 +704,191 @@ namespace Lucene.Net.Codecs.Compressing
 
         public override void Finish(Index.FieldInfos fis, int numDocs)
         {
-            if (!pendingDocs.isEmpty()) {
-              flush();
+            if (pendingDocs.Count > 0)
+            {
+                Flush();
             }
-            if (numDocs != this.numDocs) {
-              throw new RuntimeException("Wrote " + this.numDocs + " docs, finish called with numDocs=" + numDocs);
+            if (numDocs != this.numDocs)
+            {
+                throw new SystemException("Wrote " + this.numDocs + " docs, finish called with numDocs=" + numDocs);
             }
-            indexWriter.finish(numDocs);
+            indexWriter.Finish(numDocs);
         }
 
         public override IComparer<Util.BytesRef> Comparator
         {
-            get 
-            { 
-                return BytesRef.getUTF8SortedAsUnicodeComparator(); 
+            get
+            {
+                return BytesRef.UTF8SortedAsUnicodeComparer;
             }
         }
 
-        public void addProx(int numProx, DataInput positions, DataInput offsets)
+        public override void AddProx(int numProx, DataInput positions, DataInput offsets)
         {
 
-            if (curField.hasPositions) {
-                final int posStart = curField.posStart + curField.totalPositions;
-                if (posStart + numProx > positionsBuf.length) {
-                positionsBuf = ArrayUtil.grow(positionsBuf, posStart + numProx);
+            if (curField.hasPositions)
+            {
+                int posStart = curField.posStart + curField.totalPositions;
+                if (posStart + numProx > positionsBuf.Length)
+                {
+                    positionsBuf = ArrayUtil.Grow(positionsBuf, posStart + numProx);
                 }
                 int position = 0;
-                if (curField.hasPayloads) {
-                final int payStart = curField.payStart + curField.totalPositions;
-                if (payStart + numProx > payloadLengthsBuf.length) {
-                    payloadLengthsBuf = ArrayUtil.grow(payloadLengthsBuf, payStart + numProx);
-                }
-                for (int i = 0; i < numProx; ++i) {
-                    final int code = positions.readVInt();
-                    if ((code & 1) != 0) {
-                    // This position has a payload
-                    final int payloadLength = positions.readVInt();
-                    payloadLengthsBuf[payStart + i] = payloadLength;
-                    payloadBytes.copyBytes(positions, payloadLength);
-                    } else {
-                    payloadLengthsBuf[payStart + i] = 0;
+                if (curField.hasPayloads)
+                {
+                    int payStart = curField.payStart + curField.totalPositions;
+                    if (payStart + numProx > payloadLengthsBuf.Length)
+                    {
+                        payloadLengthsBuf = ArrayUtil.Grow(payloadLengthsBuf, payStart + numProx);
+                    }
+                    for (int i = 0; i < numProx; ++i)
+                    {
+                        int code = positions.ReadVInt();
+                        if ((code & 1) != 0)
+                        {
+                            // This position has a payload
+                            int payloadLength = positions.ReadVInt();
+                            payloadLengthsBuf[payStart + i] = payloadLength;
+                            payloadBytes.CopyBytes(positions, payloadLength);
+                        }
+                        else
+                        {
+                            payloadLengthsBuf[payStart + i] = 0;
+                        }
+                        position += Number.URShift(code, 1);
+                        positionsBuf[posStart + i] = position;
                     }
-                    position += code >>> 1;
-                    positionsBuf[posStart + i] = position;
-                }
-                } else {
-                for (int i = 0; i < numProx; ++i) {
-                    position += (positions.readVInt() >>> 1);
-                    positionsBuf[posStart + i] = position;
                 }
+                else
+                {
+                    for (int i = 0; i < numProx; ++i)
+                    {
+                        position += Number.URShift(positions.ReadVInt(), 1);
+                        positionsBuf[posStart + i] = position;
+                    }
                 }
             }
 
-            if (curField.hasOffsets) {
+            if (curField.hasOffsets)
+            {
                 int offStart = curField.offStart + curField.totalPositions;
-                if (offStart + numProx > startOffsetsBuf.length) {
-                    int newLength = ArrayUtil.oversize(offStart + numProx, 4);
-                    startOffsetsBuf = Arrays.copyOf(startOffsetsBuf, newLength);
-                    lengthsBuf = Arrays.copyOf(lengthsBuf, newLength);
+                if (offStart + numProx > startOffsetsBuf.Length)
+                {
+                    int newLength = ArrayUtil.Oversize(offStart + numProx, 4);
+                    startOffsetsBuf = Arrays.CopyOf(startOffsetsBuf, newLength);
+                    lengthsBuf = Arrays.CopyOf(lengthsBuf, newLength);
                 }
-                
+
                 int lastOffset = 0, startOffset, endOffset;
-                for (int i = 0; i < numProx; ++i) {
-                startOffset = lastOffset + offsets.readVInt();
-                endOffset = startOffset + offsets.readVInt();
-                lastOffset = endOffset;
-                startOffsetsBuf[offStart + i] = startOffset;
-                lengthsBuf[offStart + i] = endOffset - startOffset;
+                for (int i = 0; i < numProx; ++i)
+                {
+                    startOffset = lastOffset + offsets.ReadVInt();
+                    endOffset = startOffset + offsets.ReadVInt();
+                    lastOffset = endOffset;
+                    startOffsetsBuf[offStart + i] = startOffset;
+                    lengthsBuf[offStart + i] = endOffset - startOffset;
                 }
             }
 
             curField.totalPositions += numProx;
         }
 
-        public int merge(MergeState mergeState) 
+        public override int Merge(MergeState mergeState) 
         {
             int docCount = 0;
             int idx = 0;
 
-            for (AtomicReader reader : mergeState.readers) 
+            foreach (AtomicReader reader in mergeState.readers) 
             {
                 SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
                 CompressingTermVectorsReader matchingVectorsReader = null;
                 if (matchingSegmentReader != null) {
-                TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();
+                TermVectorsReader vectorsReader = matchingSegmentReader.TermVectorsReader;
                 // we can only bulk-copy if the matching reader is also a CompressingTermVectorsReader
-                if (vectorsReader != null && vectorsReader instanceof CompressingTermVectorsReader) {
+                if (vectorsReader != null && vectorsReader is CompressingTermVectorsReader) {
                     matchingVectorsReader = (CompressingTermVectorsReader) vectorsReader;
                 }
                 }
 
-                int maxDoc = reader.maxDoc();
-                Bits liveDocs = reader.getLiveDocs();
+                int maxDoc = reader.MaxDoc;
+                IBits liveDocs = reader.LiveDocs;
 
                 if (matchingVectorsReader == null
-                    || matchingVectorsReader.getCompressionMode() != compressionMode
-                    || matchingVectorsReader.getChunkSize() != chunkSize
-                    || matchingVectorsReader.getPackedIntsVersion() != PackedInts.VERSION_CURRENT) {
+                    || matchingVectorsReader.CompressionMode != compressionMode
+                    || matchingVectorsReader.ChunkSize != chunkSize
+                    || matchingVectorsReader.PackedIntsVersion != PackedInts.VERSION_CURRENT) {
                 // naive merge...
-                for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
-                    Fields vectors = reader.getTermVectors(i);
-                    addAllDocVectors(vectors, mergeState);
+                for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = NextLiveDoc(i + 1, liveDocs, maxDoc)) {
+                    Fields vectors = reader.GetTermVectors(i);
+                    AddAllDocVectors(vectors, mergeState);
                     ++docCount;
-                    mergeState.checkAbort.work(300);
+                    mergeState.checkAbort.Work(300);
                 }
                 } else {
-                CompressingStoredFieldsIndexReader index = matchingVectorsReader.getIndex();
-                IndexInput vectorsStream = matchingVectorsReader.getVectorsStream();
-                for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; ) {
-                    if (pendingDocs.isEmpty()
-                        && (i == 0 || index.getStartPointer(i - 1) < index.getStartPointer(i))) { // start of a chunk
-                    long startPointer = index.getStartPointer(i);
-                    vectorsStream.seek(startPointer);
-                    int docBase = vectorsStream.readVInt();
-                    int chunkDocs = vectorsStream.readVInt();
-                    if (docBase + chunkDocs < matchingSegmentReader.maxDoc()
-                        && nextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs) {
-                        long chunkEnd = index.getStartPointer(docBase + chunkDocs);
-                        long chunkLength = chunkEnd - vectorsStream.getFilePointer();
-                        indexWriter.writeIndex(chunkDocs, this.vectorsStream.getFilePointer());
-                        this.vectorsStream.writeVInt(docCount);
-                        this.vectorsStream.writeVInt(chunkDocs);
-                        this.vectorsStream.copyBytes(vectorsStream, chunkLength);
+                CompressingStoredFieldsIndexReader index = matchingVectorsReader.Index;
+                IndexInput vectorsStream = matchingVectorsReader.VectorsStream;
+                for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; ) {
+                    if (pendingDocs.Count == 0
+                        && (i == 0 || index.GetStartPointer(i - 1) < index.GetStartPointer(i))) { // start of a chunk
+                    long startPointer = index.GetStartPointer(i);
+                    vectorsStream.Seek(startPointer);
+                    int docBase = vectorsStream.ReadVInt();
+                    int chunkDocs = vectorsStream.ReadVInt();
+                    if (docBase + chunkDocs < matchingSegmentReader.MaxDoc
+                        && NextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs) {
+                        long chunkEnd = index.GetStartPointer(docBase + chunkDocs);
+                        long chunkLength = chunkEnd - vectorsStream.FilePointer;
+                        indexWriter.WriteIndex(chunkDocs, this.vectorsStream.FilePointer);
+                        this.vectorsStream.WriteVInt(docCount);
+                        this.vectorsStream.WriteVInt(chunkDocs);
+                        this.vectorsStream.CopyBytes(vectorsStream, chunkLength);
                         docCount += chunkDocs;
                         this.numDocs += chunkDocs;
-                        mergeState.checkAbort.work(300 * chunkDocs);
-                        i = nextLiveDoc(docBase + chunkDocs, liveDocs, maxDoc);
+                        mergeState.checkAbort.Work(300 * chunkDocs);
+                        i = NextLiveDoc(docBase + chunkDocs, liveDocs, maxDoc);
                     } else {
-                        for (; i < docBase + chunkDocs; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
-                        Fields vectors = reader.getTermVectors(i);
-                        addAllDocVectors(vectors, mergeState);
+                        for (; i < docBase + chunkDocs; i = NextLiveDoc(i + 1, liveDocs, maxDoc)) {
+                        Fields vectors = reader.GetTermVectors(i);
+                        AddAllDocVectors(vectors, mergeState);
                         ++docCount;
-                        mergeState.checkAbort.work(300);
+                        mergeState.checkAbort.Work(300);
                         }
                     }
                     } else {
-                    Fields vectors = reader.getTermVectors(i);
-                    addAllDocVectors(vectors, mergeState);
+                    Fields vectors = reader.GetTermVectors(i);
+                    AddAllDocVectors(vectors, mergeState);
                     ++docCount;
-                    mergeState.checkAbort.work(300);
-                    i = nextLiveDoc(i + 1, liveDocs, maxDoc);
+                    mergeState.checkAbort.Work(300);
+                    i = NextLiveDoc(i + 1, liveDocs, maxDoc);
                     }
                 }
                 }
             }
-            finish(mergeState.fieldInfos, docCount);
+            Finish(mergeState.fieldInfos, docCount);
             return docCount;
         }
 
-        private static int nextLiveDoc(int doc, Bits liveDocs, int maxDoc) 
+        private static int NextLiveDoc(int doc, IBits liveDocs, int maxDoc)
         {
-            if (liveDocs == null) {
+            if (liveDocs == null)
+            {
                 return doc;
             }
-            while (doc < maxDoc && !liveDocs.get(doc)) {
+            while (doc < maxDoc && !liveDocs[doc])
+            {
                 ++doc;
             }
             return doc;
         }
 
-        private static int nextDeletedDoc(int doc, Bits liveDocs, int maxDoc) 
+        private static int NextDeletedDoc(int doc, IBits liveDocs, int maxDoc)
         {
-            if (liveDocs == null) {
+            if (liveDocs == null)
+            {
                 return maxDoc;
             }
-            while (doc < maxDoc && liveDocs.get(doc)) {
+            while (doc < maxDoc && liveDocs[doc])
+            {
                 ++doc;
             }
             return doc;
@@ -791,10 +896,12 @@ namespace Lucene.Net.Codecs.Compressing
 
         protected override void Dispose(bool disposing)
         {
-            try 
+            try
             {
                 IOUtils.Close(vectorsStream, indexWriter);
-            } finally {
+            }
+            finally
+            {
                 vectorsStream = null;
                 indexWriter = null;
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressionMode.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressionMode.cs b/src/core/Codecs/Compressing/CompressionMode.cs
index 22b5fca..0982fd0 100644
--- a/src/core/Codecs/Compressing/CompressionMode.cs
+++ b/src/core/Codecs/Compressing/CompressionMode.cs
@@ -128,7 +128,7 @@ namespace Lucene.Net.Codecs.Compressing
         public sealed class DecompressorLZ4 : Decompressor
         {
 
-            public override void decompress(DataInput input, int originalLength, int offset, int length, BytesRef bytes)
+            public override void Decompress(DataInput input, int originalLength, int offset, int length, BytesRef bytes)
             {
                 // add 7 padding bytes, this is not necessary but can help decompression run faster
                 if (bytes.bytes.Length < originalLength + 7)
@@ -145,7 +145,7 @@ namespace Lucene.Net.Codecs.Compressing
                 bytes.length = length;
             }
 
-            public override Decompressor clone()
+            public override object Clone()
             {
                 return this;
             }
@@ -161,7 +161,7 @@ namespace Lucene.Net.Codecs.Compressing
                 ht = new LZ4.HashTable();
             }
 
-            public override void compress(byte[] bytes, int off, int len, DataOutput output)
+            public override void Compress(sbyte[] bytes, int off, int len, DataOutput output)
             {
                 LZ4.Compress(bytes, off, len, output, ht);
             }
@@ -178,7 +178,7 @@ namespace Lucene.Net.Codecs.Compressing
                 ht = new LZ4.HCHashTable();
             }
 
-            public override void compress(byte[] bytes, int off, int len, DataOutput output)
+            public override void Compress(sbyte[] bytes, int off, int len, DataOutput output)
             {
                 LZ4.CompressHC(bytes, off, len, output, ht);
             }
@@ -252,7 +252,7 @@ namespace Lucene.Net.Codecs.Compressing
                 bytes.length = length;
             }
 
-            public override Decompressor clone()
+            public override object Clone()
             {
                 return new DeflateDecompressor();
             }
@@ -271,7 +271,7 @@ namespace Lucene.Net.Codecs.Compressing
                 compressed = new sbyte[64];
             }
 
-            public override void compress(byte[] bytes, int off, int len, DataOutput output)
+            public override void Compress(sbyte[] bytes, int off, int len, DataOutput output)
             {
                 compressor.Reset();
                 compressor.SetInput(bytes, off, len);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/FieldsProducer.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/FieldsProducer.cs b/src/core/Codecs/FieldsProducer.cs
index 0f46fc7..873ce08 100644
--- a/src/core/Codecs/FieldsProducer.cs
+++ b/src/core/Codecs/FieldsProducer.cs
@@ -12,11 +12,11 @@ namespace Lucene.Net.Codecs
         {
         }
 
-        public abstract IEnumerator<string> GetEnumerator();
+        public abstract override IEnumerator<string> GetEnumerator();
 
-        public abstract Terms Terms(string field);
+        public abstract override Terms Terms(string field);
 
-        public abstract int Size { get; }
+        public abstract override int Size { get; }
         
         public void Dispose()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs b/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
index 52d3e41..caa2e21 100644
--- a/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
+++ b/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
@@ -198,7 +198,7 @@ namespace Lucene.Net.Codecs.Lucene3x
             indexStream.Seek(FORMAT_SIZE + (docID + docStoreOffset) * 8L);
         }
 
-        public void VisitDocument(int n, StoredFieldVisitor visitor)
+        public override void VisitDocument(int n, StoredFieldVisitor visitor)
         {
             SeekIndex(n);
             fieldsStream.Seek(indexStream.ReadLong());

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Lucene3x/SegmentTermDocs.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene3x/SegmentTermDocs.cs b/src/core/Codecs/Lucene3x/SegmentTermDocs.cs
index 1edd372..8427538 100644
--- a/src/core/Codecs/Lucene3x/SegmentTermDocs.cs
+++ b/src/core/Codecs/Lucene3x/SegmentTermDocs.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Codecs.Lucene3x
         protected IBits liveDocs;
         protected IndexInput freqStream;
         protected int count;
-        protected int df;
+        protected internal int df;
         internal int doc = 0;
         internal int freq;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/MultiLevelSkipListReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/MultiLevelSkipListReader.cs b/src/core/Codecs/MultiLevelSkipListReader.cs
index f212f72..47bec11 100644
--- a/src/core/Codecs/MultiLevelSkipListReader.cs
+++ b/src/core/Codecs/MultiLevelSkipListReader.cs
@@ -276,7 +276,7 @@ namespace Lucene.Net.Codecs
                 return data[pos++];
             }
 
-            public override void ReadBytes(byte[] b, int offset, int len, bool useBuffer)
+            public override void ReadBytes(byte[] b, int offset, int len)
             {
                 Array.Copy(data, pos, b, offset, len);
                 pos += len;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/PostingsWriterBase.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/PostingsWriterBase.cs b/src/core/Codecs/PostingsWriterBase.cs
index 8cead37..aa863a8 100644
--- a/src/core/Codecs/PostingsWriterBase.cs
+++ b/src/core/Codecs/PostingsWriterBase.cs
@@ -7,7 +7,7 @@ using System.Text;
 
 namespace Lucene.Net.Codecs
 {
-    public class PostingsWriterBase : PostingsConsumer, IDisposable
+    public abstract class PostingsWriterBase : PostingsConsumer, IDisposable
     {
         protected PostingsWriterBase()
         {
@@ -30,5 +30,11 @@ namespace Lucene.Net.Codecs
         }
 
         protected abstract void Dispose(bool disposing);
+
+        public abstract override void StartDoc(int docID, int freq);
+
+        public abstract override void AddPosition(int position, Util.BytesRef payload, int startOffset, int endOffset);
+
+        public abstract override void FinishDoc();
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Document/Document.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/Document.cs b/src/core/Document/Document.cs
index 608cfe6..8ea9490 100644
--- a/src/core/Document/Document.cs
+++ b/src/core/Document/Document.cs
@@ -50,6 +50,16 @@ namespace Lucene.Net.Documents
         public Document()
         {
         }
+
+        public IEnumerator<IIndexableField> GetEnumerator()
+        {
+            return fields.GetEnumerator();
+        }
+
+        System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
+        {
+            return GetEnumerator();
+        }
         
         public void Add(IIndexableField field)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Document/Field.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/Field.cs b/src/core/Document/Field.cs
index 1548dc9..9cc069f 100644
--- a/src/core/Document/Field.cs
+++ b/src/core/Document/Field.cs
@@ -292,7 +292,7 @@ namespace Lucene.Net.Documents
             fieldsData = value;
         }
 
-        public void SetIntValue(int value)
+        public virtual void SetIntValue(int value)
         {
             if (!(fieldsData is int))
             {
@@ -388,7 +388,7 @@ namespace Lucene.Net.Documents
             return result.ToString();
         }
 
-        public FieldType FieldTypeValue
+        public IIndexableFieldType FieldTypeValue
         {
             get { return type; }
         }
@@ -400,7 +400,7 @@ namespace Lucene.Net.Documents
                 return null;
             }
 
-            FieldType.NumericType? numericType = FieldTypeValue.NumericTypeValue;
+            FieldType.NumericType? numericType = ((FieldType)FieldTypeValue).NumericTypeValue;
 
             if (numericType != null)
             {
@@ -511,7 +511,7 @@ namespace Lucene.Net.Documents
                 }
             }
 
-            public override void Dispose()
+            public void Dispose()
             {
                 pos = size; // this prevents NPE when reading after close!
                 s = null;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Document/FieldType.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/FieldType.cs b/src/core/Document/FieldType.cs
index 06394f0..f9410ea 100644
--- a/src/core/Document/FieldType.cs
+++ b/src/core/Document/FieldType.cs
@@ -255,7 +255,7 @@ namespace Lucene.Net.Documents
             return result.ToString();
         }
 
-        public override FieldInfo.DocValuesType DocValueType
+        public FieldInfo.DocValuesType DocValueType
         {
             get { return docValueType; }
             set

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/AtomicReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/AtomicReader.cs b/src/core/Index/AtomicReader.cs
index 6ecf68b..5981765 100644
--- a/src/core/Index/AtomicReader.cs
+++ b/src/core/Index/AtomicReader.cs
@@ -18,7 +18,7 @@ namespace Lucene.Net.Index
             this.readerContext = new AtomicReaderContext(this);
         }
 
-        public sealed override AtomicReaderContext Context
+        public sealed override IndexReaderContext Context
         {
             get
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/AtomicReaderContext.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/AtomicReaderContext.cs b/src/core/Index/AtomicReaderContext.cs
index 37462a7..4a804b3 100644
--- a/src/core/Index/AtomicReaderContext.cs
+++ b/src/core/Index/AtomicReaderContext.cs
@@ -49,7 +49,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        public override AtomicReader Reader
+        public override IndexReader Reader
         {
             get
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/BaseCompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/BaseCompositeReader.cs b/src/core/Index/BaseCompositeReader.cs
index f2af247..81f8322 100644
--- a/src/core/Index/BaseCompositeReader.cs
+++ b/src/core/Index/BaseCompositeReader.cs
@@ -164,11 +164,12 @@ namespace Lucene.Net.Index
             return this.starts[readerIndex];
         }
 
-        protected override IList<R> GetSequentialSubReaders()
+        protected internal override IList<IndexReader> GetSequentialSubReaders()
         {
-            return subReadersList;
+            // TODO: .NET Port: does the new instance here cause problems?
+            return subReadersList.Cast<IndexReader>().ToList();
         }
 
-        protected internal abstract void DoClose();
+        protected override abstract void DoClose();
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/BinaryDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/BinaryDocValuesWriter.cs b/src/core/Index/BinaryDocValuesWriter.cs
index 37926c5..f34dd48 100644
--- a/src/core/Index/BinaryDocValuesWriter.cs
+++ b/src/core/Index/BinaryDocValuesWriter.cs
@@ -58,12 +58,16 @@ namespace Lucene.Net.Index
             dvConsumer.AddBinaryField(fieldInfo, GetBytesIterator(maxDoc));
         }
 
+        internal override void Abort()
+        {
+        }
+
         private IEnumerable<BytesRef> GetBytesIterator(int maxDocParam)
         { 
             // .NET port: using yield return instead of a custom IEnumerable type
             
             BytesRef value = new BytesRef();
-            AppendingLongBuffer.Iterator lengthsIterator = lengths.GetIterator();
+            AppendingLongBuffer.Iterator lengthsIterator = (AppendingLongBuffer.Iterator)lengths.GetIterator();
             int size = (int) lengths.Size;
             int maxDoc = maxDocParam;
             int upto = 0;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/ByteSliceReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ByteSliceReader.cs b/src/core/Index/ByteSliceReader.cs
index 7e8274e..cf0a0dc 100644
--- a/src/core/Index/ByteSliceReader.cs
+++ b/src/core/Index/ByteSliceReader.cs
@@ -52,17 +52,17 @@ namespace Lucene.Net.Index
             this.endIndex = endIndex;
 
             level = 0;
-            bufferUpto = startIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
-            bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+            bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
+            bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
             buffer = pool.buffers[bufferUpto];
-            upto = startIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+            upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;
 
             int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];
 
             if (startIndex + firstSize >= endIndex)
             {
                 // There is only this one slice to read
-                limit = endIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+                limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
             }
             else
                 limit = upto + firstSize - 4;
@@ -115,11 +115,11 @@ namespace Lucene.Net.Index
             level = ByteBlockPool.NEXT_LEVEL_ARRAY[level];
             int newSize = ByteBlockPool.LEVEL_SIZE_ARRAY[level];
 
-            bufferUpto = nextIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
-            bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+            bufferUpto = nextIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
+            bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
 
             buffer = pool.buffers[bufferUpto];
-            upto = nextIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+            upto = nextIndex & ByteBlockPool.BYTE_BLOCK_MASK;
 
             if (nextIndex + newSize >= endIndex)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/ByteSliceWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ByteSliceWriter.cs b/src/core/Index/ByteSliceWriter.cs
index 1329441..9195a2f 100644
--- a/src/core/Index/ByteSliceWriter.cs
+++ b/src/core/Index/ByteSliceWriter.cs
@@ -42,15 +42,15 @@ namespace Lucene.Net.Index
         /// <summary> Set up the writer to write at address.</summary>
         public void Init(int address)
         {
-            slice = pool.buffers[address >> DocumentsWriter.BYTE_BLOCK_SHIFT];
+            slice = pool.buffers[address >> ByteBlockPool.BYTE_BLOCK_SHIFT];
             Debug.Assert(slice != null);
-            upto = address & DocumentsWriter.BYTE_BLOCK_MASK;
+            upto = address & ByteBlockPool.BYTE_BLOCK_MASK;
             offset0 = address;
             Debug.Assert(upto < slice.Length);
         }
 
         /// <summary>Write byte into byte slice stream </summary>
-        public void WriteByte(byte b)
+        public override void WriteByte(byte b)
         {
             Debug.Assert(slice != null);
             if (slice[upto] != 0)
@@ -64,7 +64,7 @@ namespace Lucene.Net.Index
             Debug.Assert(upto != slice.Length);
         }
 
-        public void WriteBytes(byte[] b, int offset, int len)
+        public override void WriteBytes(byte[] b, int offset, int len)
         {
             int offsetEnd = offset + len;
             while (offset < offsetEnd)
@@ -84,17 +84,17 @@ namespace Lucene.Net.Index
 
         public int Address
         {
-            get { return upto + (offset0 & DocumentsWriter.BYTE_BLOCK_NOT_MASK); }
+            get { return upto + (offset0 & DocumentsWriterPerThread.BYTE_BLOCK_NOT_MASK); }
         }
 
-        public void WriteVInt(int i)
-        {
-            while ((i & ~0x7F) != 0)
-            {
-                WriteByte((byte)((i & 0x7f) | 0x80));
-                i = Number.URShift(i, 7);
-            }
-            WriteByte((byte)i);
-        }
+        //public void WriteVInt(int i)
+        //{
+        //    while ((i & ~0x7F) != 0)
+        //    {
+        //        WriteByte((byte)((i & 0x7f) | 0x80));
+        //        i = Number.URShift(i, 7);
+        //    }
+        //    WriteByte((byte)i);
+        //}
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/CompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CompositeReader.cs b/src/core/Index/CompositeReader.cs
index 866e1d1..205c48e 100644
--- a/src/core/Index/CompositeReader.cs
+++ b/src/core/Index/CompositeReader.cs
@@ -51,7 +51,7 @@ namespace Lucene.Net.Index
          */
         protected internal abstract IList<IndexReader> GetSequentialSubReaders();
 
-        public override CompositeReaderContext Context
+        public override IndexReaderContext Context
         {
             get
             {
@@ -66,25 +66,25 @@ namespace Lucene.Net.Index
             }
         }
 
-        public abstract Fields GetTermVectors(int docID);
+        public abstract override Fields GetTermVectors(int docID);
 
-        public abstract int NumDocs { get; }
+        public abstract override int NumDocs { get; }
 
-        public abstract int MaxDoc { get; }
+        public abstract override int MaxDoc { get; }
 
-        public abstract void Document(int docID, StoredFieldVisitor visitor);
+        public abstract override void Document(int docID, StoredFieldVisitor visitor);
 
-        protected abstract void DoClose();
+        protected abstract override void DoClose();
 
-        public abstract int DocFreq(Term term);
+        public abstract override int DocFreq(Term term);
 
-        public abstract long TotalTermFreq(Term term);
+        public abstract override long TotalTermFreq(Term term);
 
-        public abstract long GetSumDocFreq(string field);
+        public abstract override long GetSumDocFreq(string field);
 
-        public abstract int GetDocCount(string field);
+        public abstract override int GetDocCount(string field);
 
-        public abstract long GetSumTotalTermFreq(string field);
+        public abstract override long GetSumTotalTermFreq(string field);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/CompositeReaderContext.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CompositeReaderContext.cs b/src/core/Index/CompositeReaderContext.cs
index ad57d45..12e2f4f 100644
--- a/src/core/Index/CompositeReaderContext.cs
+++ b/src/core/Index/CompositeReaderContext.cs
@@ -62,7 +62,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        public override CompositeReader Reader
+        public override IndexReader Reader
         {
             get
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/DocValuesProcessor.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocValuesProcessor.cs b/src/core/Index/DocValuesProcessor.cs
index d74de92..5b94dce 100644
--- a/src/core/Index/DocValuesProcessor.cs
+++ b/src/core/Index/DocValuesProcessor.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Index
 
         public override void AddField(int docID, IIndexableField field, FieldInfo fieldInfo)
         {
-            FieldInfo.DocValuesType dvType = field.FieldType.DocValueType;
+            FieldInfo.DocValuesType dvType = field.FieldTypeValue.DocValueType;
             if (dvType != null)
             {
                 fieldInfo.DocValuesTypeValue = dvType;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/DocumentsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocumentsWriter.cs b/src/core/Index/DocumentsWriter.cs
index 6c64af9..7d944aa 100644
--- a/src/core/Index/DocumentsWriter.cs
+++ b/src/core/Index/DocumentsWriter.cs
@@ -404,7 +404,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        internal void Close()
+        public void Dispose()
         {
             closed = true;
             flushControl.SetClosed();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/FilterDirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FilterDirectoryReader.cs b/src/core/Index/FilterDirectoryReader.cs
index f221082..938e2f9 100644
--- a/src/core/Index/FilterDirectoryReader.cs
+++ b/src/core/Index/FilterDirectoryReader.cs
@@ -55,7 +55,7 @@ namespace Lucene.Net.Index
             return instance == null ? null : DoWrapDirectoryReader(instance);
         }
 
-        protected override DirectoryReader DoOpenIfChanged()
+        protected internal override DirectoryReader DoOpenIfChanged()
         {
             return WrapDirectoryReader(instance.DoOpenIfChanged());
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/FreqProxTermsWriterPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FreqProxTermsWriterPerField.cs b/src/core/Index/FreqProxTermsWriterPerField.cs
index 8cf260c..fed6bea 100644
--- a/src/core/Index/FreqProxTermsWriterPerField.cs
+++ b/src/core/Index/FreqProxTermsWriterPerField.cs
@@ -77,7 +77,7 @@ namespace Lucene.Net.Index
 
         internal bool hasPayloads;
 
-        internal override void SkippingLongTerm()
+        public override void SkippingLongTerm()
         {
         }
 
@@ -109,7 +109,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        internal override bool Start(IIndexableField[] fields, int count)
+        public override bool Start(IIndexableField[] fields, int count)
         {
             for (int i = 0; i < count; i++)
             {
@@ -121,7 +121,7 @@ namespace Lucene.Net.Index
             return false;
         }
 
-        internal override void Start(IIndexableField f)
+        public override void Start(IIndexableField f)
         {
             if (fieldState.attributeSource.HasAttribute<IPayloadAttribute>())
             {
@@ -186,7 +186,7 @@ namespace Lucene.Net.Index
             postings.lastOffsets[termID] = startOffset;
         }
 
-        internal override void NewTerm(int termID)
+        public override void NewTerm(int termID)
         {
             // First time we're seeing this term since the last
             // flush
@@ -219,7 +219,7 @@ namespace Lucene.Net.Index
             fieldState.uniqueTermCount++;
         }
 
-        internal override void AddTerm(int termID)
+        public override void AddTerm(int termID)
         {
             ////assert docState.testPoint("FreqProxTermsWriterPerField.addTerm start");
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/IIndexableField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IIndexableField.cs b/src/core/Index/IIndexableField.cs
index 2e3a2c3..29ff853 100644
--- a/src/core/Index/IIndexableField.cs
+++ b/src/core/Index/IIndexableField.cs
@@ -12,7 +12,7 @@ namespace Lucene.Net.Index
     {
         string Name { get; }
 
-        IIndexableFieldType FieldType { get; }
+        IIndexableFieldType FieldTypeValue { get; }
 
         float Boost { get; }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/IndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexWriter.cs b/src/core/Index/IndexWriter.cs
index aaebb1f..2f4c37b 100644
--- a/src/core/Index/IndexWriter.cs
+++ b/src/core/Index/IndexWriter.cs
@@ -873,7 +873,7 @@ namespace Lucene.Net.Index
                     infoStream.Message("IW", "now flush at close waitForMerges=" + waitForMerges);
                 }
 
-                docWriter.Close();
+                docWriter.Dispose();
 
                 try
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/IndexWriterConfig.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexWriterConfig.cs b/src/core/Index/IndexWriterConfig.cs
index 1ad38d6..3003acc 100644
--- a/src/core/Index/IndexWriterConfig.cs
+++ b/src/core/Index/IndexWriterConfig.cs
@@ -406,32 +406,32 @@ namespace Lucene.Net.Index
             return SetInfoStream(new PrintStreamInfoStream(printStream));
         }
 
-        public override IndexWriterConfig SetMaxBufferedDeleteTerms(int maxBufferedDeleteTerms)
+        public override LiveIndexWriterConfig SetMaxBufferedDeleteTerms(int maxBufferedDeleteTerms)
         {
             return (IndexWriterConfig)base.SetMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
         }
 
-        public override IndexWriterConfig SetMaxBufferedDocs(int maxBufferedDocs)
+        public override LiveIndexWriterConfig SetMaxBufferedDocs(int maxBufferedDocs)
         {
             return (IndexWriterConfig)base.SetMaxBufferedDocs(maxBufferedDocs);
         }
 
-        public override IndexWriterConfig SetMergedSegmentWarmer(IndexReaderWarmer mergeSegmentWarmer)
+        public override LiveIndexWriterConfig SetMergedSegmentWarmer(IndexReaderWarmer mergeSegmentWarmer)
         {
             return (IndexWriterConfig)base.SetMergedSegmentWarmer(mergeSegmentWarmer);
         }
 
-        public override IndexWriterConfig SetRAMBufferSizeMB(double ramBufferSizeMB)
+        public override LiveIndexWriterConfig SetRAMBufferSizeMB(double ramBufferSizeMB)
         {
             return (IndexWriterConfig)base.SetRAMBufferSizeMB(ramBufferSizeMB);
         }
 
-        public override IndexWriterConfig SetReaderTermsIndexDivisor(int divisor)
+        public override LiveIndexWriterConfig SetReaderTermsIndexDivisor(int divisor)
         {
             return (IndexWriterConfig)base.SetReaderTermsIndexDivisor(divisor);
         }
 
-        public override IndexWriterConfig SetTermIndexInterval(int interval)
+        public override LiveIndexWriterConfig SetTermIndexInterval(int interval)
         {
             return (IndexWriterConfig)base.SetTermIndexInterval(interval);
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs b/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
index 0c0f705..f824e6b 100644
--- a/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
+++ b/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
@@ -33,14 +33,14 @@ namespace Lucene.Net.Index
         }
 
         /// <summary> Deletes all commits except the most recent one.</summary>
-        public void OnInit<T>(IList<T> commits) where T : IndexCommit
+        public override void OnInit<T>(IList<T> commits)
         {
             // Note that commits.size() should normally be 1:
             OnCommit(commits);
         }
 
         /// <summary> Deletes all commits except the most recent one.</summary>
-        public void OnCommit<T>(IList<T> commits) where T : IndexCommit
+        public override void OnCommit<T>(IList<T> commits)
         {
             // Note that commits.size() should normally be 2 (if not
             // called by onInit above):

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/LogMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/LogMergePolicy.cs b/src/core/Index/LogMergePolicy.cs
index 76e0252..286ebd5 100644
--- a/src/core/Index/LogMergePolicy.cs
+++ b/src/core/Index/LogMergePolicy.cs
@@ -547,7 +547,7 @@ namespace Lucene.Net.Index
         /// will return multiple merges, allowing the <see cref="MergeScheduler" />
         /// to use concurrency. 
         /// </summary>
-        public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos infos)
+        public override MergeSpecification FindMerges(MergeTrigger? mergeTrigger, SegmentInfos infos)
         {
             int numSegments = infos.Count;
             if (Verbose)


[04/50] [abbrv] Port: more util unit tests

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/06f5d4b8/test/core/Util/TestPagedBytes.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestPagedBytes.cs b/test/core/Util/TestPagedBytes.cs
new file mode 100644
index 0000000..bd80843
--- /dev/null
+++ b/test/core/Util/TestPagedBytes.cs
@@ -0,0 +1,131 @@
+using System;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestPagedBytes : LuceneTestCase
+    {
+        [Test]
+        public virtual void TestDataInputOutput()
+        {
+            var random = new Random();
+            for (var iter = 0; iter < 5 * RANDOM_MULTIPLIER; iter++)
+            {
+                var dir = NewFSDirectory(_TestUtil.GetTempDir("testOverflow"));
+                if (dir is MockDirectoryWrapper)
+                {
+                    ((MockDirectoryWrapper)dir).SetThrottling(MockDirectoryWrapper.Throttling.NEVER);
+                }
+                int blockBits = _TestUtil.Next(random, 1, 20);
+                var blockSize = 1 << blockBits;
+                var p = new PagedBytes(blockBits);
+                var indexOutput = dir.CeateOutput("foo", IOContext.DEFAULT);
+                int numBytes = _TestUtil.NextInt(random, 2, 10000000);
+
+                var answer = new byte[numBytes];
+                random.NextBytes(answer);
+                var written = 0;
+                while (written < numBytes)
+                {
+                    if (random.Next(10) == 7)
+                    {
+                        indexOutput.WriteByte(answer[written++]);
+                    }
+                    else
+                    {
+                        int chunk = Math.Min(random.Next(1000), numBytes - written);
+                        indexOutput.WriteBytes(answer, written, chunk);
+                        written += chunk;
+                    }
+                }
+
+                indexOutput.Close();
+                IndexInput input = dir.OpenInput("foo", IOContext.DEFAULT);
+                var dataInput = (DataInput)input.Clone();
+
+                p.Copy(input, input.Length);
+                var reader = p.Freeze(random.NextBool());
+
+                var verify = new byte[numBytes];
+                var read = 0;
+                while (read < numBytes)
+                {
+                    if (random.Next(10) == 7)
+                    {
+                        verify[read++] = dataInput.ReadByte();
+                    }
+                    else
+                    {
+                        var chunk = Math.Min(random.Next(1000), numBytes - read);
+                        dataInput.ReadBytes(verify, read, chunk);
+                        read += chunk;
+                    }
+                }
+                assertTrue(Arrays.Equals(answer, verify));
+
+                var slice = new BytesRef();
+                for (var iter2 = 0; iter2 < 100; iter2++)
+                {
+                    var pos = random.Next(numBytes - 1);
+                    var len = random.Next(Math.Min(blockSize + 1, numBytes - pos));
+                    reader.FillSlice(slice, pos, len);
+                    for (var byteUpto = 0; byteUpto < len; byteUpto++)
+                    {
+                        assertEquals(answer[pos + byteUpto], slice.bytes[slice.offset + byteUpto]);
+                    }
+                }
+                input.Close();
+                dir.Close();
+            }
+        }
+
+        [Ignore] // memory hole
+        [Test]
+        public virtual void TestOverflow()
+        {
+            var random = new Random();
+
+            var dir = NewFSDirectory(_TestUtil.GetTempDir("testOverflow"));
+            if (dir is MockDirectoryWrapper)
+            {
+                ((MockDirectoryWrapper)dir).SetThrottling(MockDirectoryWrapper.Throttling.NEVER);
+            }
+            int blockBits = _TestUtil.NextInt(random, 14, 28);
+            var blockSize = 1 << blockBits;
+            var arr = new byte[_TestUtil.NextInt(random, blockSize / 2, blockSize * 2)];
+            for (var i = 0; i < arr.Length; ++i)
+            {
+                arr[i] = (byte)i;
+            }
+            var numBytes = (1L << 31) + _TestUtil.NextInt(random, 1, blockSize * 3);
+            var p = new PagedBytes(blockBits);
+            IndexOutput indexOutput = dir.CreateOutput("foo", IOContext.DEFAULT);
+            for (long i = 0; i < numBytes; )
+            {
+                assertEquals(i, indexOutput.FilePointer);
+                var len = (int)Math.Min(arr.Length, numBytes - i);
+                indexOutput.WriteBytes(arr, len);
+                i += len;
+            }
+            assertEquals(numBytes, indexOutput.FilePointer);
+            indexOutput.Close();
+            IndexInput indexInput = dir.OpenInput("foo", IOContext.DEFAULT);
+            p.Copy(indexInput, numBytes);
+            var reader = p.Freeze(random.NextBool());
+
+            foreach (var offset in new long[] {0L, int.MinValue, numBytes - 1, _TestUtil.NextLong(random, 1, numBytes - 2)})
+            {
+                var b = new BytesRef();
+                reader.FillSlice(b, offset, 1);
+                assertEquals(arr[(int)(offset % arr.Length)], b.bytes[b.offset]);
+            }
+            indexInput.Close();
+            dir.Close();
+        }
+    }
+}


[44/50] [abbrv] git commit: Some bugfixes in token parsing

Posted by mh...@apache.org.
Some bugfixes in token parsing

Fix use of octal literals from java, goto instead of break outer


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/02a37a05
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/02a37a05
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/02a37a05

Branch: refs/heads/branch_4x
Commit: 02a37a050953848bde175a9f9f33dc43334cd1ee
Parents: 0e6eb14
Author: Paul Irwin <pa...@gmail.com>
Authored: Fri Aug 9 14:18:02 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Fri Aug 9 14:18:02 2013 -0400

----------------------------------------------------------------------
 src/contrib/QueryParsers/Classic/QueryParser.cs | 11 ++-
 .../Classic/QueryParserTokenManager.cs          | 98 ++++++++++----------
 2 files changed, 56 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/02a37a05/src/contrib/QueryParsers/Classic/QueryParser.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/QueryParser.cs b/src/contrib/QueryParsers/Classic/QueryParser.cs
index ca76ac5..74864ee 100644
--- a/src/contrib/QueryParsers/Classic/QueryParser.cs
+++ b/src/contrib/QueryParsers/Classic/QueryParser.cs
@@ -113,7 +113,6 @@ namespace Lucene.Net.QueryParsers.Classic
 
             while (true)
             {
-                bool shouldBreakOuter = false;
                 switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
                 {
                     case QueryParserConstants.AND:
@@ -136,18 +135,20 @@ namespace Lucene.Net.QueryParsers.Classic
                         break;
                     default:
                         jj_la1[4] = jj_gen;
-                        shouldBreakOuter = true;
-                        break;
+                        goto label_1;
                 }
 
-                if (shouldBreakOuter) break;
                 conj = Conjunction();
                 mods = Modifiers();
                 q = Clause(field);
                 AddClause(clauses, conj, mods, q);
             }
+
+        label_1:
             if (clauses.Count == 1 && firstQuery != null)
-            { if (true) return firstQuery; }
+            { 
+                if (true) return firstQuery; 
+            }
             else
             {
                 { if (true) return GetBooleanQuery(clauses); }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/02a37a05/src/contrib/QueryParsers/Classic/QueryParserTokenManager.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/QueryParserTokenManager.cs b/src/contrib/QueryParsers/Classic/QueryParserTokenManager.cs
index 29ed068..6878a9f 100644
--- a/src/contrib/QueryParsers/Classic/QueryParserTokenManager.cs
+++ b/src/contrib/QueryParsers/Classic/QueryParserTokenManager.cs
@@ -71,17 +71,17 @@ namespace Lucene.Net.QueryParsers.Classic
             return jjMoveNfa_2(state, pos + 1);
         }
 
-        internal static readonly long[] jjbitVec0 = {
+        internal static readonly ulong[] jjbitVec0 = {
             0x1L, 0x0L, 0x0L, 0x0L
         };
-        internal static readonly long[] jjbitVec1 = {
-            unchecked((long)0xfffffffffffffffeL), unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL)
+        internal static readonly ulong[] jjbitVec1 = {
+            0xfffffffffffffffeL, 0xffffffffffffffffL, 0xffffffffffffffffL, 0xffffffffffffffffL
         };
-        internal static readonly long[] jjbitVec3 = {
-            0x0L, 0x0L, unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL)
+        internal static readonly ulong[] jjbitVec3 = {
+            0x0L, 0x0L, 0xffffffffffffffffL, 0xffffffffffffffffL
         };
-        internal static readonly long[] jjbitVec4 = {
-            unchecked((long)0xfffefffffffffffeL), unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL)
+        internal static readonly ulong[] jjbitVec4 = {
+            0xfffefffffffffffeL, 0xffffffffffffffffL, 0xffffffffffffffffL, 0xffffffffffffffffL
         };
 
         private int jjMoveNfa_2(int startState, int curPos)
@@ -97,21 +97,21 @@ namespace Lucene.Net.QueryParsers.Classic
                     ReInitRounds();
                 if (curChar < 64)
                 {
-                    long l = 1L << curChar;
+                    ulong l = (ulong)( 1L << curChar);
                     do
                     {
                         switch (jjstateSet[--i])
                         {
                             case 49:
                             case 33:
-                                if ((unchecked((long)0xfbff7cf8ffffd9ffL) & l) == 0L)
+                                if ((0xfbff7cf8ffffd9ffL & l) == 0L)
                                     break;
                                 if (kind > 23)
                                     kind = 23;
                                 jjCheckNAddTwoStates(33, 34);
                                 break;
                             case 0:
-                                if ((unchecked((long)0xfbff54f8ffffd9ffL) & l) != 0L)
+                                if ((0xfbff54f8ffffd9ffL & l) != 0L)
                                 {
                                     if (kind > 23)
                                         kind = 23;
@@ -172,7 +172,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                     jjCheckNAddStates(3, 5);
                                 break;
                             case 17:
-                                if ((unchecked((long)0xfffffffbffffffffL) & l) != 0L)
+                                if ((0xfffffffbffffffffL & l) != 0L)
                                     jjCheckNAddStates(3, 5);
                                 break;
                             case 19:
@@ -229,7 +229,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                     kind = 22;
                                 break;
                             case 32:
-                                if ((unchecked((long)0xfbff54f8ffffd9ffL) & l) == 0L)
+                                if ((0xfbff54f8ffffd9ffL & l) == 0L)
                                     break;
                                 if (kind > 23)
                                     kind = 23;
@@ -246,7 +246,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                     jjCheckNAddStates(0, 2);
                                 break;
                             case 37:
-                                if ((unchecked((long)0xffff7fffffffffffL) & l) != 0L)
+                                if ((0xffff7fffffffffffL & l) != 0L)
                                     jjCheckNAddStates(0, 2);
                                 break;
                             case 40:
@@ -285,13 +285,13 @@ namespace Lucene.Net.QueryParsers.Classic
                 }
                 else if (curChar < 128)
                 {
-                    long l = 1L << (curChar & 077);
+                    ulong l = (ulong)(1L << (curChar & 63));
                     do
                     {
                         switch (jjstateSet[--i])
                         {
                             case 49:
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) != 0L)
+                                if ((0x97ffffff87ffffffL & l) != 0L)
                                 {
                                     if (kind > 23)
                                         kind = 23;
@@ -301,7 +301,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                     jjCheckNAddTwoStates(35, 35);
                                 break;
                             case 0:
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) != 0L)
+                                if ((0x97ffffff87ffffffL & l) != 0L)
                                 {
                                     if (kind > 20)
                                         kind = 20;
@@ -315,7 +315,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                         kind = 21;
                                     jjCheckNAddStates(24, 26);
                                 }
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) != 0L)
+                                if ((0x97ffffff87ffffffL & l) != 0L)
                                 {
                                     if (kind > 23)
                                         kind = 23;
@@ -371,7 +371,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                     jjstateSet[jjnewStateCnt++] = 11;
                                 break;
                             case 17:
-                                if ((unchecked((long)0xffffffffefffffffL) & l) != 0L)
+                                if ((0xffffffffefffffffL & l) != 0L)
                                     jjCheckNAddStates(3, 5);
                                 break;
                             case 18:
@@ -389,7 +389,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                 jjCheckNAddStates(24, 26);
                                 break;
                             case 25:
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                if ((0x97ffffff87ffffffL & l) == 0L)
                                     break;
                                 if (kind > 21)
                                     kind = 21;
@@ -405,7 +405,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                 jjCheckNAddTwoStates(25, 26);
                                 break;
                             case 28:
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                if ((0x97ffffff87ffffffL & l) == 0L)
                                     break;
                                 if (kind > 21)
                                     kind = 21;
@@ -421,14 +421,14 @@ namespace Lucene.Net.QueryParsers.Classic
                                 jjCheckNAddTwoStates(28, 29);
                                 break;
                             case 32:
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                if ((0x97ffffff87ffffffL & l) == 0L)
                                     break;
                                 if (kind > 23)
                                     kind = 23;
                                 jjCheckNAddTwoStates(33, 34);
                                 break;
                             case 33:
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                if ((0x97ffffff87ffffffL & l) == 0L)
                                     break;
                                 if (kind > 23)
                                     kind = 23;
@@ -451,14 +451,14 @@ namespace Lucene.Net.QueryParsers.Classic
                                     jjstateSet[jjnewStateCnt++] = 38;
                                 break;
                             case 41:
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                if ((0x97ffffff87ffffffL & l) == 0L)
                                     break;
                                 if (kind > 20)
                                     kind = 20;
                                 jjCheckNAddStates(6, 10);
                                 break;
                             case 42:
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                if ((0x97ffffff87ffffffL & l) == 0L)
                                     break;
                                 if (kind > 20)
                                     kind = 20;
@@ -474,7 +474,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                 jjCheckNAddTwoStates(42, 43);
                                 break;
                             case 45:
-                                if ((unchecked((long)0x97ffffff87ffffffL) & l) != 0L)
+                                if ((0x97ffffff87ffffffL & l) != 0L)
                                     jjCheckNAddStates(18, 20);
                                 break;
                             case 46:
@@ -496,9 +496,9 @@ namespace Lucene.Net.QueryParsers.Classic
                 {
                     int hiByte = (int)(curChar >> 8);
                     int i1 = hiByte >> 6;
-                    long l1 = 1L << (hiByte & 077);
+                    ulong l1 = (ulong)(1L << (hiByte & 63));
                     int i2 = (curChar & 0xff) >> 6;
-                    long l2 = 1L << (curChar & 077);
+                    ulong l2 = (ulong)(1L << (curChar & 63));
                     do
                     {
                         switch (jjstateSet[--i])
@@ -650,7 +650,7 @@ namespace Lucene.Net.QueryParsers.Classic
                     ReInitRounds();
                 if (curChar < 64)
                 {
-                    long l = 1L << curChar;
+                    ulong l = (ulong)(1L << curChar);
                     do
                     {
                         switch (jjstateSet[--i])
@@ -679,7 +679,7 @@ namespace Lucene.Net.QueryParsers.Classic
                 }
                 else if (curChar < 128)
                 {
-                    long l = 1L << (curChar & 077);
+                    ulong l = (ulong)(1L << (curChar & 63));
                     do
                     {
                         switch (jjstateSet[--i])
@@ -692,9 +692,9 @@ namespace Lucene.Net.QueryParsers.Classic
                 {
                     int hiByte = (int)(curChar >> 8);
                     int i1 = hiByte >> 6;
-                    long l1 = 1L << (hiByte & 077);
+                    ulong l1 = (ulong)(1L << (hiByte & 63));
                     int i2 = (curChar & 0xff) >> 6;
-                    long l2 = 1L << (curChar & 077);
+                    ulong l2 = (ulong)(1L << (curChar & 63));
                     do
                     {
                         switch (jjstateSet[--i])
@@ -795,13 +795,13 @@ namespace Lucene.Net.QueryParsers.Classic
                     ReInitRounds();
                 if (curChar < 64)
                 {
-                    long l = 1L << curChar;
+                    ulong l = (ulong)(1L << curChar);
                     do
                     {
                         switch (jjstateSet[--i])
                         {
                             case 0:
-                                if ((unchecked((long)0xfffffffeffffffffL) & l) != 0L)
+                                if ((0xfffffffeffffffffL & l) != 0L)
                                 {
                                     if (kind > 32)
                                         kind = 32;
@@ -820,7 +820,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                     jjCheckNAddTwoStates(2, 4);
                                 break;
                             case 2:
-                                if ((unchecked((long)0xfffffffbffffffffL) & l) != 0L)
+                                if ((0xfffffffbffffffffL & l) != 0L)
                                     jjCheckNAddStates(33, 35);
                                 break;
                             case 3:
@@ -832,7 +832,7 @@ namespace Lucene.Net.QueryParsers.Classic
                                     kind = 31;
                                 break;
                             case 6:
-                                if ((unchecked((long)0xfffffffeffffffffL) & l) == 0L)
+                                if ((0xfffffffeffffffffL & l) == 0L)
                                     break;
                                 if (kind > 32)
                                     kind = 32;
@@ -844,14 +844,14 @@ namespace Lucene.Net.QueryParsers.Classic
                 }
                 else if (curChar < 128)
                 {
-                    long l = 1L << (curChar & 077);
+                    ulong l = (ulong)(1L << (curChar & 63));
                     do
                     {
                         switch (jjstateSet[--i])
                         {
                             case 0:
                             case 6:
-                                if ((unchecked((long)0xdfffffffdfffffffL) & l) == 0L)
+                                if ((0xdfffffffdfffffffL & l) == 0L)
                                     break;
                                 if (kind > 32)
                                     kind = 32;
@@ -872,9 +872,9 @@ namespace Lucene.Net.QueryParsers.Classic
                 {
                     int hiByte = (int)(curChar >> 8);
                     int i1 = hiByte >> 6;
-                    long l1 = 1L << (hiByte & 077);
+                    ulong l1 = (ulong)(1L << (hiByte & 63));
                     int i2 = (curChar & 0xff) >> 6;
-                    long l2 = 1L << (curChar & 077);
+                    ulong l2 = (ulong)(1L << (curChar & 63));
                     do
                     {
                         switch (jjstateSet[--i])
@@ -927,7 +927,7 @@ namespace Lucene.Net.QueryParsers.Classic
            1, 2, 4, 5, 
         };
 
-        private static bool jjCanMove_0(int hiByte, int i1, int i2, long l1, long l2)
+        private static bool jjCanMove_0(int hiByte, int i1, int i2, ulong l1, ulong l2)
         {
             switch (hiByte)
             {
@@ -938,7 +938,7 @@ namespace Lucene.Net.QueryParsers.Classic
             }
         }
 
-        private static bool jjCanMove_1(int hiByte, int i1, int i2, long l1, long l2)
+        private static bool jjCanMove_1(int hiByte, int i1, int i2, ulong l1, ulong l2)
         {
             switch (hiByte)
             {
@@ -951,7 +951,7 @@ namespace Lucene.Net.QueryParsers.Classic
             }
         }
 
-        private static bool jjCanMove_2(int hiByte, int i1, int i2, long l1, long l2)
+        private static bool jjCanMove_2(int hiByte, int i1, int i2, ulong l1, ulong l2)
         {
             switch (hiByte)
             {
@@ -992,7 +992,7 @@ namespace Lucene.Net.QueryParsers.Classic
         };
 
         protected ICharStream input_stream;
-        private readonly int[] jjrounds = new int[49];
+        private readonly uint[] jjrounds = new uint[49];
         private readonly int[] jjstateSet = new int[98];
         protected char curChar;
 
@@ -1021,9 +1021,9 @@ namespace Lucene.Net.QueryParsers.Classic
         private void ReInitRounds()
         {
             int i;
-            jjround = unchecked((int)0x80000001);
+            jjround = 0x80000001;
             for (i = 49; i-- > 0; )
-                jjrounds[i] = unchecked((int)0x80000000);
+                jjrounds[i] = 0x80000000;
         }
 
         /** Reinitialise parser. */
@@ -1069,7 +1069,7 @@ namespace Lucene.Net.QueryParsers.Classic
         int curLexState = 2;
         int defaultLexState = 2;
         int jjnewStateCnt;
-        int jjround;
+        uint jjround;
         int jjmatchedPos;
         int jjmatchedKind;
 
@@ -1114,7 +1114,7 @@ namespace Lucene.Net.QueryParsers.Classic
                 {
                     if (jjmatchedPos + 1 < curPos)
                         input_stream.Backup(curPos - jjmatchedPos - 1);
-                    if ((jjtoToken[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L)
+                    if ((jjtoToken[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 63))) != 0L)
                     {
                         matchedToken = jjFillToken();
                         if (jjnewLexState[jjmatchedKind] != -1)
@@ -1125,7 +1125,7 @@ namespace Lucene.Net.QueryParsers.Classic
                     {
                         if (jjnewLexState[jjmatchedKind] != -1)
                             curLexState = jjnewLexState[jjmatchedKind];
-                        continue;
+                        goto EOFLoop;
                     }
                 }
                 int error_line = input_stream.EndLine;
@@ -1151,6 +1151,8 @@ namespace Lucene.Net.QueryParsers.Classic
                     error_after = curPos <= 1 ? "" : input_stream.GetImage();
                 }
                 throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, TokenMgrError.LEXICAL_ERROR);
+
+            EOFLoop: ;
             }
         }
 


[23/50] [abbrv] git commit: Some cleanup

Posted by mh...@apache.org.
Some cleanup


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/2a56f3bd
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/2a56f3bd
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/2a56f3bd

Branch: refs/heads/branch_4x
Commit: 2a56f3bdd16c219f0a61757065991688aa4a7cdb
Parents: 16ff6a7
Author: Paul Irwin <pa...@gmail.com>
Authored: Wed Jul 24 11:03:14 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Wed Jul 24 11:03:14 2013 -0400

----------------------------------------------------------------------
 .../Compressing/GrowableByteArrayDataOutput.cs  |  12 +-
 src/core/Codecs/Lucene3x/Lucene3xCodec.cs       |   5 +-
 src/core/Document/DoubleField.cs                |   7 +-
 src/core/Document/FloatField.cs                 |   7 +-
 src/core/Document/IntField.cs                   |   7 +-
 src/core/Document/LongField.cs                  |   7 +-
 src/core/Document/StringField.cs                |   7 +-
 src/core/Index/CheckIndex.cs                    |   1 +
 src/core/Index/SegmentInfo.cs                   |   1 +
 src/core/Index/SegmentInfos.cs                  |   1 +
 src/core/Index/TieredMergePolicy.cs             |   2 +-
 src/core/Search/MultiTermQuery.cs               | 118 +++++++++++++++----
 src/core/Search/Payloads/PayloadTermQuery.cs    |   2 +-
 src/core/Search/PrefixTermsEnum.cs              |   2 +-
 src/core/Search/ScoringRewrite.cs               |   2 +-
 src/core/Search/Spans/SpanFirstQuery.cs         |   8 +-
 src/core/Search/TermRangeFilter.cs              |   2 +-
 src/core/Search/TermRangeTermEnum.cs            |   2 +-
 src/core/Search/TopTermsRewrite.cs              |   2 +-
 src/core/Store/CompoundFileDirectory.cs         |   3 +-
 src/core/Util/Automaton/CompiledAutomaton.cs    |   1 +
 src/core/Util/Automaton/LevenshteinAutomata.cs  |   4 +-
 src/core/Util/CommandLineUtil.cs                |   2 +-
 23 files changed, 141 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs b/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs
index d6b873d..e980736 100644
--- a/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs
+++ b/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs
@@ -25,8 +25,8 @@ namespace Lucene.Net.Codecs.Compressing
 {
     internal sealed class GrowableByteArrayDataOutput : DataOutput
     {
-        private sbyte[] _bytes;
-        private int _length;
+        private sbyte[] bytes;
+        private int length;
 
         public GrowableByteArrayDataOutput(int cp)
         {
@@ -38,11 +38,11 @@ namespace Lucene.Net.Codecs.Compressing
         {
             get
             {
-                return _bytes;
+                return bytes;
             }
             set
             {
-                _bytes = value;
+                bytes = value;
             }
         }
 
@@ -50,11 +50,11 @@ namespace Lucene.Net.Codecs.Compressing
         {
             get
             {
-                return _length;
+                return length;
             }
             set
             {
-                _length = value;
+                length = value;
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Codecs/Lucene3x/Lucene3xCodec.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene3x/Lucene3xCodec.cs b/src/core/Codecs/Lucene3x/Lucene3xCodec.cs
index ea3986e..bb3cde7 100644
--- a/src/core/Codecs/Lucene3x/Lucene3xCodec.cs
+++ b/src/core/Codecs/Lucene3x/Lucene3xCodec.cs
@@ -1,4 +1,5 @@
-using Lucene.Net.Index;
+using Lucene.Net.Codecs.Lucene40;
+using Lucene.Net.Index;
 using System;
 using System.Collections.Generic;
 using System.Linq;
@@ -60,7 +61,7 @@ namespace Lucene.Net.Codecs.Lucene3x
 
         public override DocValuesFormat DocValuesFormat
         {
-            get { return docValuesFormat }
+            get { return docValuesFormat; }
         }
 
         public override StoredFieldsFormat StoredFieldsFormat

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Document/DoubleField.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/DoubleField.cs b/src/core/Document/DoubleField.cs
index 5f6860a..3d3b31c 100644
--- a/src/core/Document/DoubleField.cs
+++ b/src/core/Document/DoubleField.cs
@@ -1,4 +1,5 @@
-using System;
+using Lucene.Net.Index;
+using System;
 using System.Collections.Generic;
 using System.Linq;
 using System.Text;
@@ -15,14 +16,14 @@ namespace Lucene.Net.Documents
             TYPE_NOT_STORED.Indexed = true;
             TYPE_NOT_STORED.Tokenized = true;
             TYPE_NOT_STORED.OmitNorms = true;
-            TYPE_NOT_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_NOT_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_NOT_STORED.NumericTypeValue = FieldType.NumericType.DOUBLE;
             TYPE_NOT_STORED.Freeze();
 
             TYPE_STORED.Indexed = true;
             TYPE_STORED.Tokenized = true;
             TYPE_STORED.OmitNorms = true;
-            TYPE_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_STORED.NumericTypeValue = FieldType.NumericType.DOUBLE;
             TYPE_STORED.Stored = true;
             TYPE_STORED.Freeze();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Document/FloatField.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/FloatField.cs b/src/core/Document/FloatField.cs
index c13f90a..893892a 100644
--- a/src/core/Document/FloatField.cs
+++ b/src/core/Document/FloatField.cs
@@ -1,4 +1,5 @@
-using System;
+using Lucene.Net.Index;
+using System;
 using System.Collections.Generic;
 using System.Linq;
 using System.Text;
@@ -14,14 +15,14 @@ namespace Lucene.Net.Documents
             TYPE_NOT_STORED.Indexed = true;
             TYPE_NOT_STORED.Tokenized = true;
             TYPE_NOT_STORED.OmitNorms = true;
-            TYPE_NOT_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_NOT_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_NOT_STORED.NumericTypeValue = FieldType.NumericType.FLOAT;
             TYPE_NOT_STORED.Freeze();
 
             TYPE_STORED.Indexed = true;
             TYPE_STORED.Tokenized = true;
             TYPE_STORED.OmitNorms = true;
-            TYPE_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_STORED.NumericTypeValue = FieldType.NumericType.FLOAT;
             TYPE_STORED.Stored = true;
             TYPE_STORED.Freeze();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Document/IntField.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/IntField.cs b/src/core/Document/IntField.cs
index 247f7e0..f525b58 100644
--- a/src/core/Document/IntField.cs
+++ b/src/core/Document/IntField.cs
@@ -1,4 +1,5 @@
-using System;
+using Lucene.Net.Index;
+using System;
 using System.Collections.Generic;
 using System.Linq;
 using System.Text;
@@ -15,14 +16,14 @@ namespace Lucene.Net.Documents
             TYPE_NOT_STORED.Indexed = true;
             TYPE_NOT_STORED.Tokenized = true;
             TYPE_NOT_STORED.OmitNorms = true;
-            TYPE_NOT_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_NOT_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_NOT_STORED.NumericTypeValue = FieldType.NumericType.INT;
             TYPE_NOT_STORED.Freeze();
 
             TYPE_STORED.Indexed = true;
             TYPE_STORED.Tokenized = true;
             TYPE_STORED.OmitNorms = true;
-            TYPE_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_STORED.NumericTypeValue = FieldType.NumericType.INT;
             TYPE_STORED.Stored = true;
             TYPE_STORED.Freeze();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Document/LongField.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/LongField.cs b/src/core/Document/LongField.cs
index 8b4eb05..3620164 100644
--- a/src/core/Document/LongField.cs
+++ b/src/core/Document/LongField.cs
@@ -1,4 +1,5 @@
-using System;
+using Lucene.Net.Index;
+using System;
 using System.Collections.Generic;
 using System.Linq;
 using System.Text;
@@ -15,14 +16,14 @@ namespace Lucene.Net.Documents
             TYPE_NOT_STORED.Indexed = true;
             TYPE_NOT_STORED.Tokenized = true;
             TYPE_NOT_STORED.OmitNorms = true;
-            TYPE_NOT_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_NOT_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_NOT_STORED.NumericTypeValue = FieldType.NumericType.LONG;
             TYPE_NOT_STORED.Freeze();
 
             TYPE_STORED.Indexed = true;
             TYPE_STORED.Tokenized = true;
             TYPE_STORED.OmitNorms = true;
-            TYPE_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_STORED.NumericTypeValue = FieldType.NumericType.LONG;
             TYPE_STORED.Stored = true;
             TYPE_STORED.Freeze();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Document/StringField.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/StringField.cs b/src/core/Document/StringField.cs
index ef64e2e..ebe8683 100644
--- a/src/core/Document/StringField.cs
+++ b/src/core/Document/StringField.cs
@@ -1,4 +1,5 @@
-using System;
+using Lucene.Net.Index;
+using System;
 using System.Collections.Generic;
 using System.Linq;
 using System.Text;
@@ -14,13 +15,13 @@ namespace Lucene.Net.Documents
         {
             TYPE_NOT_STORED.Indexed = true;
             TYPE_NOT_STORED.OmitNorms = true;
-            TYPE_NOT_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_NOT_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_NOT_STORED.Tokenized = false;
             TYPE_NOT_STORED.Freeze();
 
             TYPE_STORED.Indexed = true;
             TYPE_STORED.OmitNorms = true;
-            TYPE_STORED.IndexOptions = IndexOptions.DOCS_ONLY;
+            TYPE_STORED.IndexOptions = FieldInfo.IndexOptions.DOCS_ONLY;
             TYPE_STORED.Stored = true;
             TYPE_STORED.Tokenized = true;
             TYPE_STORED.Freeze();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Index/CheckIndex.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CheckIndex.cs b/src/core/Index/CheckIndex.cs
index f424182..4ea52da 100644
--- a/src/core/Index/CheckIndex.cs
+++ b/src/core/Index/CheckIndex.cs
@@ -16,6 +16,7 @@
  */
 
 using Lucene.Net.Codecs;
+using Lucene.Net.Codecs.Lucene3x;
 using Lucene.Net.Search;
 using Lucene.Net.Store;
 using Lucene.Net.Support;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Index/SegmentInfo.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentInfo.cs b/src/core/Index/SegmentInfo.cs
index 0831e26..41a0bf0 100644
--- a/src/core/Index/SegmentInfo.cs
+++ b/src/core/Index/SegmentInfo.cs
@@ -24,6 +24,7 @@ using Lucene.Net.Support;
 using Directory = Lucene.Net.Store.Directory;
 using IndexInput = Lucene.Net.Store.IndexInput;
 using IndexOutput = Lucene.Net.Store.IndexOutput;
+using Lucene.Net.Codecs.Lucene3x;
 
 namespace Lucene.Net.Index
 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Index/SegmentInfos.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentInfos.cs b/src/core/Index/SegmentInfos.cs
index 569b19e..caf74a3 100644
--- a/src/core/Index/SegmentInfos.cs
+++ b/src/core/Index/SegmentInfos.cs
@@ -16,6 +16,7 @@
  */
 
 using Lucene.Net.Codecs;
+using Lucene.Net.Codecs.Lucene3x;
 using Lucene.Net.Store;
 using Lucene.Net.Support;
 using Lucene.Net.Util;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Index/TieredMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/TieredMergePolicy.cs b/src/core/Index/TieredMergePolicy.cs
index 3d7c76b..1d5953b 100644
--- a/src/core/Index/TieredMergePolicy.cs
+++ b/src/core/Index/TieredMergePolicy.cs
@@ -768,7 +768,7 @@ namespace Lucene.Net.Index
             }
             set
             {
-                SetMaxCFSSegmentSizeMB(v);
+                SetMaxCFSSegmentSizeMB(value);
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Search/MultiTermQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/MultiTermQuery.cs b/src/core/Search/MultiTermQuery.cs
index afca8e9..15d4e02 100644
--- a/src/core/Search/MultiTermQuery.cs
+++ b/src/core/Search/MultiTermQuery.cs
@@ -53,25 +53,7 @@ namespace Lucene.Net.Search
     [Serializable]
     public abstract class MultiTermQuery : Query
     {
-        [Serializable]
-        public class AnonymousClassConstantScoreAutoRewrite : ConstantScoreAutoRewrite
-        {
-            public override int TermCountCutoff
-            {
-                set { throw new NotSupportedException("Please create a private instance"); }
-            }
-
-            public override double DocCountPercent
-            {
-                set { throw new NotSupportedException("Please create a private instance"); }
-            }
-
-            // Make sure we are still a singleton even after deserializing
-            protected internal virtual object ReadResolve()
-            {
-                return CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
-            }
-        }
+        
         protected internal RewriteMethod internalRewriteMethod = CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
 
         [Serializable]
@@ -140,6 +122,95 @@ namespace Lucene.Net.Search
 	    public static readonly RewriteMethod CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE =
 	        ScoringRewrite<MultiTermQuery>.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE;
 
+        [Serializable]
+        public sealed class TopTermsScoringBooleanQueryRewrite : TopTermsRewrite<BooleanQuery>
+        {
+            public TopTermsScoringBooleanQueryRewrite(int size)
+                : base(size)
+            {
+            }
+
+            protected override int MaxSize
+            {
+                get { return BooleanQuery.MaxClauseCount; }
+            }
+
+            protected override BooleanQuery TopLevelQuery
+            {
+                get { return new BooleanQuery(true); }
+            }
+
+            protected override void AddClause(BooleanQuery topLevel, Term term, int docCount, float boost, TermContext states)
+            {
+                TermQuery tq = new TermQuery(term, states);
+                tq.Boost = boost;
+                topLevel.Add(tq, Occur.SHOULD);
+            }
+        }
+
+        [Serializable]
+        public sealed class TopTermsBoostOnlyBooleanQueryRewrite : TopTermsRewrite<BooleanQuery>
+        {
+            public TopTermsBoostOnlyBooleanQueryRewrite(int size)
+                : base(size)
+            {
+            }
+
+            protected override int MaxSize
+            {
+                get { return BooleanQuery.MaxClauseCount; }
+            }
+
+            protected override BooleanQuery TopLevelQuery
+            {
+                get { return new BooleanQuery(true); }
+            }
+
+            protected override void AddClause(BooleanQuery topLevel, Term term, int docCount, float boost, TermContext states)
+            {
+                Query q = new ConstantScoreQuery(new TermQuery(term, states));
+                q.Boost = boost;
+                topLevel.Add(q, Occur.SHOULD);
+            }
+        }
+
+        [Serializable]
+        public class ConstantScoreAutoRewrite : Lucene.Net.Search.ConstantScoreAutoRewrite
+        {
+            // Make sure we are still a singleton even after deserializing
+            protected internal virtual object ReadResolve()
+            {
+                return CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+            }
+        }
+
+        [Serializable]
+        private sealed class AnonymousConstantScoreAutoRewriteDefault : ConstantScoreAutoRewrite
+        {
+            public override int TermCountCutoff
+            {
+                get
+                {
+                    return base.TermCountCutoff;
+                }
+                set
+                {
+                    throw new NotSupportedException("Please create a private instance");
+                }
+            }
+
+            public override double DocCountPercent
+            {
+                get
+                {
+                    return base.DocCountPercent;
+                }
+                set
+                {
+                    throw new NotSupportedException("Please create a private instance");
+                }
+            }           
+        }
 
         /// <summary>Read-only default instance of <see cref="ConstantScoreAutoRewrite" />
         ///, with <see cref="ConstantScoreAutoRewrite.TermCountCutoff" />
@@ -154,7 +225,7 @@ namespace Lucene.Net.Search
         /// instance; you'll need to create a private instance
         /// instead. 
         /// </summary>
-        public static readonly RewriteMethod CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+        public static readonly RewriteMethod CONSTANT_SCORE_AUTO_REWRITE_DEFAULT = new AnonymousConstantScoreAutoRewriteDefault();
 
         /// <summary> Constructs a query matching terms that cannot be represented with a single
         /// Term.
@@ -232,12 +303,7 @@ namespace Lucene.Net.Search
         {
             return GetTermsEnum(terms, new AttributeSource());
         }
-
-        static MultiTermQuery()
-        {
-            CONSTANT_SCORE_AUTO_REWRITE_DEFAULT = new AnonymousClassConstantScoreAutoRewrite();
-        }
-
+        
         /// <summary>Abstract class that defines how the query is rewritten. </summary>
         [Serializable]
         public abstract class RewriteMethod

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Search/Payloads/PayloadTermQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Payloads/PayloadTermQuery.cs b/src/core/Search/Payloads/PayloadTermQuery.cs
index b74ee81..3426bac 100644
--- a/src/core/Search/Payloads/PayloadTermQuery.cs
+++ b/src/core/Search/Payloads/PayloadTermQuery.cs
@@ -67,7 +67,7 @@ public class PayloadTermQuery : SpanTermQuery {
 
       protected void ProcessPayload(Similarity similarity)  {
         if (termSpans.IsPayloadAvailable()) {
-          DocsAndPositionsEnum postings = termSpans.getPostings();
+          DocsAndPositionsEnum postings = termSpans.Postings;
           payload = postings.Payload;
           if (payload != null) {
             payloadScore = function.CurrentScore(doc, term.field(),

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Search/PrefixTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/PrefixTermsEnum.cs b/src/core/Search/PrefixTermsEnum.cs
index 854b992..c153fe5 100644
--- a/src/core/Search/PrefixTermsEnum.cs
+++ b/src/core/Search/PrefixTermsEnum.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Search
         public PrefixTermsEnum(TermsEnum tenum, BytesRef prefixText)
             : base(tenum)
         {
-            SetInitialSeekTerm(this.prefixRef = prefixText);
+            InitialSeekTerm = this.prefixRef = prefixText;
         }
 
         protected override AcceptStatus Accept(BytesRef term)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Search/ScoringRewrite.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ScoringRewrite.cs b/src/core/Search/ScoringRewrite.cs
index ae5c45a..a4d2d6b 100644
--- a/src/core/Search/ScoringRewrite.cs
+++ b/src/core/Search/ScoringRewrite.cs
@@ -54,7 +54,7 @@ namespace Lucene.Net.Search
 
         public override sealed Query Rewrite(IndexReader reader, MultiTermQuery query)
         {
-            var result = GetTopLevelQuery();
+            var result = TopLevelQuery;
             var col = new ParallelArraysTermCollector(this);
             CollectTerms(reader, query, col);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Search/Spans/SpanFirstQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanFirstQuery.cs b/src/core/Search/Spans/SpanFirstQuery.cs
index a0357e6..ddefb22 100644
--- a/src/core/Search/Spans/SpanFirstQuery.cs
+++ b/src/core/Search/Spans/SpanFirstQuery.cs
@@ -56,7 +56,7 @@ namespace Lucene.Net.Search.Spans
 
         public override object Clone()
         {
-            SpanFirstQuery spanFirstQuery = new SpanFirstQuery((SpanQuery) match.clone(), end);
+            SpanFirstQuery spanFirstQuery = new SpanFirstQuery((SpanQuery) match.Clone(), end);
             spanFirstQuery.Boost = Boost;
             return spanFirstQuery;
         }
@@ -68,15 +68,15 @@ namespace Lucene.Net.Search.Spans
 
             SpanFirstQuery other = (SpanFirstQuery) o;
             return this.end == other.end
-                   && this.match.equals(other.match)
+                   && this.match.Equals(other.match)
                    && this.Boost == other.Boost;
         }
 
         public override int GetHashCode()
         {
-            int h = match.hashCode();
+            int h = match.GetHashCode();
             h ^= (h << 8) | Number.URShift(h, 25); // reversible
-            h ^= Float.floatToRawIntBits(Boost) ^ end;
+            h ^= Number.FloatToIntBits(Boost) ^ end;
             return h;
         }
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Search/TermRangeFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TermRangeFilter.cs b/src/core/Search/TermRangeFilter.cs
index 65914a5..61f3cc3 100644
--- a/src/core/Search/TermRangeFilter.cs
+++ b/src/core/Search/TermRangeFilter.cs
@@ -61,7 +61,7 @@ namespace Lucene.Net.Search
         {
             var lower = lowerTerm == null ? null : new BytesRef(lowerTerm);
             var upper = upperTerm == null ? null : new BytesRef(upperTerm);
-            return new TermRangeFilter(field, lower, uppoer, includeLower, includeUpper);
+            return new TermRangeFilter(field, lower, upper, includeLower, includeUpper);
         }
 		
 		/// <summary> Constructs a filter for field <c>fieldName</c> matching

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Search/TermRangeTermEnum.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TermRangeTermEnum.cs b/src/core/Search/TermRangeTermEnum.cs
index 4e921c1..4c68e13 100644
--- a/src/core/Search/TermRangeTermEnum.cs
+++ b/src/core/Search/TermRangeTermEnum.cs
@@ -64,7 +64,7 @@ namespace Lucene.Net.Search
                 upperBytesRef = upperTerm;
             }
 
-            SetInitialSeekTerm(lowerBytesRef);
+            InitialSeekTerm = lowerBytesRef;
             termComp = Comparator;
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Search/TopTermsRewrite.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TopTermsRewrite.cs b/src/core/Search/TopTermsRewrite.cs
index 5c33720..1c1295e 100644
--- a/src/core/Search/TopTermsRewrite.cs
+++ b/src/core/Search/TopTermsRewrite.cs
@@ -138,7 +138,7 @@ namespace Lucene.Net.Search
             var stQueue = new Support.PriorityQueue<ScoreTerm>();
             CollectTerms(reader, query, new AnonymousRewriteTermCollector(this, stQueue));
 
-            var q = GetTopLevelQuery();
+            var q = TopLevelQuery;
             var scoreTerms = stQueue.ToArray();
             ArrayUtil.MergeSort(scoreTerms, scoreTermSortByTermComp);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Store/CompoundFileDirectory.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/CompoundFileDirectory.cs b/src/core/Store/CompoundFileDirectory.cs
index 76cde01..cb92f8e 100644
--- a/src/core/Store/CompoundFileDirectory.cs
+++ b/src/core/Store/CompoundFileDirectory.cs
@@ -1,4 +1,5 @@
-using Lucene.Net.Index;
+using Lucene.Net.Codecs;
+using Lucene.Net.Index;
 using Lucene.Net.Support;
 using Lucene.Net.Util;
 using System;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Util/Automaton/CompiledAutomaton.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Automaton/CompiledAutomaton.cs b/src/core/Util/Automaton/CompiledAutomaton.cs
index 4d5f729..5fef853 100644
--- a/src/core/Util/Automaton/CompiledAutomaton.cs
+++ b/src/core/Util/Automaton/CompiledAutomaton.cs
@@ -1,4 +1,5 @@
 using Lucene.Net.Index;
+using Lucene.Net.Search;
 using System;
 using System.Collections.Generic;
 using System.Linq;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Util/Automaton/LevenshteinAutomata.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Automaton/LevenshteinAutomata.cs b/src/core/Util/Automaton/LevenshteinAutomata.cs
index f08d750..e1b07e5 100644
--- a/src/core/Util/Automaton/LevenshteinAutomata.cs
+++ b/src/core/Util/Automaton/LevenshteinAutomata.cs
@@ -76,8 +76,8 @@ namespace Lucene.Net.Util.Automaton
 
             descriptions = new ParametricDescription[] {
                 null, /* for n=0, we do not need to go through the trouble */
-                withTranspositions ? new Lev1TParametricDescription(word.Length) : new Lev1ParametricDescription(word.Length),
-                withTranspositions ? new Lev2TParametricDescription(word.Length) : new Lev2ParametricDescription(word.Length),
+                withTranspositions ? (ParametricDescription)new Lev1TParametricDescription(word.Length) : new Lev1ParametricDescription(word.Length),
+                withTranspositions ? (ParametricDescription)new Lev2TParametricDescription(word.Length) : new Lev2ParametricDescription(word.Length),
             };
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/2a56f3bd/src/core/Util/CommandLineUtil.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/CommandLineUtil.cs b/src/core/Util/CommandLineUtil.cs
index d91b41b..4707768 100644
--- a/src/core/Util/CommandLineUtil.cs
+++ b/src/core/Util/CommandLineUtil.cs
@@ -14,7 +14,7 @@ namespace Lucene.Net.Util
             try
             {
                 Type clazz = LoadFSDirectoryClass(clazzName);
-                return NewFSDirectory(clazz, file);
+                return NewFSDirectory(clazz, dir);
 
             }
             catch (TypeLoadException e)


[26/50] [abbrv] IT NOW BUILDS!~!!!

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs b/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs
index f6224e3..020ffa2 100644
--- a/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs
+++ b/src/core/Search/Spans/SpanNearPayloadCheckQuery.cs
@@ -3,6 +3,7 @@ using System.Linq;
 using System.Text;
 using Lucene.Net.Support;
 using Lucene.Net.Util;
+using System;
 
 namespace Lucene.Net.Search.Spans
 {
@@ -50,11 +51,11 @@ namespace Lucene.Net.Search.Spans
         {
             var buffer = new StringBuilder();
             buffer.Append("spanPayCheck(");
-            buffer.Append(match.toString(field));
+            buffer.Append(match.ToString(field));
             buffer.Append(", payloadRef: ");
             foreach (var bytes in payloadToMatch)
             {
-                ToStringUtils.ByteArray(buffer, bytes);
+                ToStringUtils.ByteArray(buffer, (sbyte[])(Array)bytes);
                 buffer.Append(';');
             }
             buffer.Append(")");
@@ -64,7 +65,7 @@ namespace Lucene.Net.Search.Spans
 
         public override object Clone()
         {
-            var result = new SpanNearPayloadCheckQuery((SpanNearQuery) match.clone(), payloadToMatch);
+            var result = new SpanNearPayloadCheckQuery((SpanNearQuery) match.Clone(), payloadToMatch);
             result.Boost = Boost;
             return result;
         }
@@ -76,13 +77,13 @@ namespace Lucene.Net.Search.Spans
 
             var other = (SpanNearPayloadCheckQuery) o;
             return this.payloadToMatch.Equals(other.payloadToMatch)
-                   && this.match.equals(other.match)
+                   && this.match.Equals(other.match)
                    && this.Boost == other.Boost;
         }
 
         public override int GetHashCode()
         {
-            int h = match.hashCode();
+            int h = match.GetHashCode();
             h ^= (h << 8) | Support.Number.URShift(h, 25); // reversible
             //TODO: is this right?
             h ^= payloadToMatch.GetHashCode();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Spans/SpanNearQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanNearQuery.cs b/src/core/Search/Spans/SpanNearQuery.cs
index b945f29..1aa8ca8 100644
--- a/src/core/Search/Spans/SpanNearQuery.cs
+++ b/src/core/Search/Spans/SpanNearQuery.cs
@@ -139,7 +139,7 @@ namespace Lucene.Net.Search.Spans
                 // optimize 1-clause case
                 return clauses[0].GetSpans(context, acceptDocs, termContexts);
 
-            return inOrder ? (SpansBase)new NearSpansOrdered(this, context, collectPayloads) : (SpansBase)new NearSpansUnordered(this, context);
+            return inOrder ? (SpansBase)new NearSpansOrdered(this, context, acceptDocs, termContexts, collectPayloads) : (SpansBase)new NearSpansUnordered(this, context, acceptDocs, termContexts);
         }
 
         public override Query Rewrite(IndexReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Spans/SpanPositionCheckQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanPositionCheckQuery.cs b/src/core/Search/Spans/SpanPositionCheckQuery.cs
index b24c9ef..bf55a94 100644
--- a/src/core/Search/Spans/SpanPositionCheckQuery.cs
+++ b/src/core/Search/Spans/SpanPositionCheckQuery.cs
@@ -49,7 +49,7 @@ namespace Lucene.Net.Search.Spans
 
         public override SpansBase GetSpans(AtomicReaderContext context, IBits acceptDocs, IDictionary<Term, TermContext> termContexts)
         {
-            return new PositionCheckSpan(context, acceptDocs, termContexts);
+            return new PositionCheckSpan(context, acceptDocs, termContexts, this);
         }
 
         public override Query Rewrite(IndexReader reader)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Spans/SpanScorer.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanScorer.cs b/src/core/Search/Spans/SpanScorer.cs
index 2657542..0c7a730 100644
--- a/src/core/Search/Spans/SpanScorer.cs
+++ b/src/core/Search/Spans/SpanScorer.cs
@@ -114,7 +114,7 @@ namespace Lucene.Net.Search.Spans
 
         public override long Cost
         {
-            get { return spans.Cost(); }
+            get { return spans.Cost; }
         }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Spans/SpanTermQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanTermQuery.cs b/src/core/Search/Spans/SpanTermQuery.cs
index 0c6fd7f..c3db355 100644
--- a/src/core/Search/Spans/SpanTermQuery.cs
+++ b/src/core/Search/Spans/SpanTermQuery.cs
@@ -30,38 +30,38 @@ namespace Lucene.Net.Search.Spans
     [Serializable]
     public class SpanTermQuery : SpanQuery
     {
-        protected Term internalTerm;
+        protected Term term;
 
         /// <summary>Construct a SpanTermQuery matching the named term's spans. </summary>
         public SpanTermQuery(Term term)
         {
-            this.internalTerm = term;
+            this.term = term;
         }
 
         /// <summary>Return the term whose spans are matched. </summary>
         public virtual Term Term
         {
-            get { return internalTerm; }
+            get { return term; }
         }
 
         public override string Field
         {
-            get { return internalTerm.Field; }
+            get { return term.Field; }
         }
 
         public override void ExtractTerms(ISet<Term> terms)
         {
-            terms.Add(internalTerm);
+            terms.Add(term);
         }
 
         public override string ToString(string field)
         {
             var buffer = new StringBuilder();
-            if (internalTerm.Field.Equals(field))
-                buffer.Append(internalTerm.Text);
+            if (term.Field.Equals(field))
+                buffer.Append(term.Text);
             else
             {
-                buffer.Append(internalTerm.ToString());
+                buffer.Append(term.ToString());
             }
             buffer.Append(ToStringUtils.Boost(Boost));
             return buffer.ToString();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Spans/SpanWeight.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/SpanWeight.cs b/src/core/Search/Spans/SpanWeight.cs
index c4c693b..9506a94 100644
--- a/src/core/Search/Spans/SpanWeight.cs
+++ b/src/core/Search/Spans/SpanWeight.cs
@@ -96,7 +96,7 @@ namespace Lucene.Net.Search.Spans
 
         public override Explanation Explain(AtomicReaderContext context, int doc)
         {
-            var scorer = (SpanScorer)Scorer(context, true, false, context.Reader.LiveDocs);
+            var scorer = (SpanScorer)Scorer(context, true, false, context.AtomicReader.LiveDocs);
             if (scorer != null)
             {
                 var newDoc = scorer.Advance(doc);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/Spans/TermSpans.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Spans/TermSpans.cs b/src/core/Search/Spans/TermSpans.cs
index fddd161..367907f 100644
--- a/src/core/Search/Spans/TermSpans.cs
+++ b/src/core/Search/Spans/TermSpans.cs
@@ -184,8 +184,8 @@ namespace Lucene.Net.Search.Spans
             {
                 get { return 0; }
             }
-
-            private static readonly TermSpans EMPTY_TERM_SPANS = new EmptyTermSpans();
         }
+
+        public static readonly TermSpans EMPTY_TERM_SPANS = new EmptyTermSpans();
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/TermQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TermQuery.cs b/src/core/Search/TermQuery.cs
index 553beb8..969b355 100644
--- a/src/core/Search/TermQuery.cs
+++ b/src/core/Search/TermQuery.cs
@@ -101,7 +101,7 @@ namespace Lucene.Net.Search
                     // assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term;
                     return null;
                 }
-                var termsEnum = context.Reader.Terms(parent.term.Field).Iterator(null);
+                var termsEnum = context.AtomicReader.Terms(parent.term.Field).Iterator(null);
                 termsEnum.SeekExact(parent.term.Bytes, state);
                 return termsEnum;
             }
@@ -114,7 +114,7 @@ namespace Lucene.Net.Search
 
             public override Explanation Explain(AtomicReaderContext context, int doc)
             {
-                var scorer = Scorer(context, true, false, context.Reader.LiveDocs);
+                var scorer = Scorer(context, true, false, context.AtomicReader.LiveDocs);
                 if (scorer != null)
                 {
                     int newDoc = scorer.Advance(doc);
@@ -163,7 +163,7 @@ namespace Lucene.Net.Search
         {
             var context = searcher.TopReaderContext;
             TermContext termState;
-            if (perReaderTermState == null || perReaderTermState.TopReaderContext != context)
+            if (perReaderTermState == null || perReaderTermState.topReaderContext != context)
             {
                 // make TermQuery single-pass if we don't have a PRTS or if the context differs!
                 termState = TermContext.Build(context, term, true); // cache term lookups!

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/TermRangeFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TermRangeFilter.cs b/src/core/Search/TermRangeFilter.cs
index 61f3cc3..8ecc938 100644
--- a/src/core/Search/TermRangeFilter.cs
+++ b/src/core/Search/TermRangeFilter.cs
@@ -81,13 +81,13 @@ namespace Lucene.Net.Search
 		}
 
 	    /// <summary>Returns the lower value of this range filter </summary>
-	    public virtual string LowerTerm
+	    public virtual BytesRef LowerTerm
 	    {
 	        get { return query.LowerTerm; }
 	    }
 
 	    /// <summary>Returns the upper value of this range filter </summary>
-	    public virtual string UpperTerm
+	    public virtual BytesRef UpperTerm
 	    {
 	        get { return query.UpperTerm; }
 	    }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/TermRangeQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TermRangeQuery.cs b/src/core/Search/TermRangeQuery.cs
index ed5824e..9203d8e 100644
--- a/src/core/Search/TermRangeQuery.cs
+++ b/src/core/Search/TermRangeQuery.cs
@@ -40,8 +40,8 @@ namespace Lucene.Net.Search
     [Serializable]
     public class TermRangeQuery : MultiTermQuery
     {
-        private string lowerTerm;
-        private string upperTerm;
+        private BytesRef lowerTerm;
+        private BytesRef upperTerm;
         private bool includeLower;
         private bool includeUpper;
 
@@ -85,13 +85,13 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>Returns the lower value of this range query </summary>
-        public virtual string LowerTerm
+        public virtual BytesRef LowerTerm
         {
             get { return lowerTerm; }
         }
 
         /// <summary>Returns the upper value of this range query </summary>
-        public virtual string UpperTerm
+        public virtual BytesRef UpperTerm
         {
             get { return upperTerm; }
         }
@@ -110,14 +110,14 @@ namespace Lucene.Net.Search
 
         protected internal override TermsEnum GetTermsEnum(Terms terms, AttributeSource atts)
 		{
-			if (lowerTerm != null && upperTerm != null && lowerTerm.CompareTo(upperTerm) > )
+			if (lowerTerm != null && upperTerm != null && lowerTerm.CompareTo(upperTerm) > 0)
             {
                 return TermsEnum.EMPTY;
             }
 
             var tenum = terms.Iterator(null);
 
-            if ((lowerTerm == null || (includeLower && lowerTerm.Length == 0)) && uperTerm == null) 
+            if ((lowerTerm == null || (includeLower && lowerTerm.length == 0)) && upperTerm == null) 
             {
                 return tenum;
             }
@@ -134,10 +134,10 @@ namespace Lucene.Net.Search
                 buffer.Append(":");
             }
             buffer.Append(includeLower ? '[' : '{');
-            buffer.append(lowerTerm != null ? ("*".Equals(Term.ToString(lowerTerm)) ? "\\*" : Term.ToString(lowerTerm)) : "*");
-            buffer.append(" TO ");
-            buffer.append(upperTerm != null ? ("*".Equals(Term.ToString(upperTerm)) ? "\\*" : Term.ToString(upperTerm)) : "*");
-            buffer.append(includeUpper ? ']' : '}');
+            buffer.Append(lowerTerm != null ? ("*".Equals(Term.ToString(lowerTerm)) ? "\\*" : Term.ToString(lowerTerm)) : "*");
+            buffer.Append(" TO ");
+            buffer.Append(upperTerm != null ? ("*".Equals(Term.ToString(upperTerm)) ? "\\*" : Term.ToString(upperTerm)) : "*");
+            buffer.Append(includeUpper ? ']' : '}');
             buffer.Append(ToStringUtils.Boost(Boost));
             return buffer.ToString();
         }
@@ -162,9 +162,9 @@ namespace Lucene.Net.Search
             if (GetType() != obj.GetType())
                 return false;
             var other = (TermRangeQuery)obj;
-            if (includeLower != other.IncludeLower)
+            if (includeLower != other.includeLower)
                 return false;
-            if (includeUpper != other.IncludeUpper)
+            if (includeUpper != other.includeUpper)
                 return false;
             if (lowerTerm == null)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/TopDocs.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TopDocs.cs b/src/core/Search/TopDocs.cs
index 7dd08b2..183ab88 100644
--- a/src/core/Search/TopDocs.cs
+++ b/src/core/Search/TopDocs.cs
@@ -187,7 +187,7 @@ namespace Lucene.Net.Search
 
                 for (var compIDX = 0; compIDX < comparators.Length; compIDX++)
                 {
-                    var comp = comparators[compIDX];
+                    FieldComparator comp = comparators[compIDX];
 
                     var cmp = reverseMul[compIDX] * comp.CompareValues(firstFD.fields[compIDX], secondFD.fields[compIDX]);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Search/TopFieldCollector.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/TopFieldCollector.cs b/src/core/Search/TopFieldCollector.cs
index 5ea70e5..4a18202 100644
--- a/src/core/Search/TopFieldCollector.cs
+++ b/src/core/Search/TopFieldCollector.cs
@@ -101,7 +101,7 @@ namespace Lucene.Net.Search
             {
                 this.docBase = context.docBase;
                 queue.SetComparator(0, comparator.SetNextReader(context));
-                comparator = queue.FirstComparator;
+                comparator = queue.firstComparator;
             }
 
             public override void SetScorer(Scorer scorer)
@@ -493,7 +493,7 @@ namespace Lucene.Net.Search
                 this.docBase = docBase;
                 for (var i = 0; i < comparators.Length; i++)
                 {
-                    queue.SetComparators(i, comparators[i].SetNextReader(context));
+                    queue.SetComparator(i, comparators[i].SetNextReader(context));
                 }
             }
 
@@ -1005,7 +1005,7 @@ namespace Lucene.Net.Search
                 {
                     var comp = comparators[compIDX];
 
-                    var cmp = reverseMul[compIDX] * comp.CompareDocToValue(doc, after.fields[compIDX]);
+                    var cmp = reverseMul[compIDX] * comp.CompareDocToObjectValue(doc, after.fields[compIDX]);
                     if (cmp < 0)
                     {
                         return;
@@ -1208,7 +1208,7 @@ namespace Lucene.Net.Search
             if (numHits <= 0) throw new ArgumentException("numHits must be > 0; please use TotalHitCountCollector if you just need the total hit count");
 
 
-            FieldValueHitQueue<Entry> queue = FieldValueHitQueue<Entry>.Create(sort.fields, numHits);
+            FieldValueHitQueue<Entry> queue = FieldValueHitQueue.Create<Entry>(sort.fields, numHits);
             if (after == null)
             {
                 if (queue.Comparators.Length == 1)
@@ -1335,7 +1335,7 @@ namespace Lucene.Net.Search
             }
 
             // If this is a maxScoring tracking collector and there were no results, 
-            return new TopFieldDocs(totalHits, results, ((FieldValueHitQueue<Entry>)pq).GetFields(), maxScore);
+            return new TopFieldDocs(totalHits, results, ((FieldValueHitQueue<Entry>)pq).Fields, maxScore);
         }
 
         public override bool AcceptsDocsOutOfOrder

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Store/NIOFSDirectory.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/NIOFSDirectory.cs b/src/core/Store/NIOFSDirectory.cs
index e7d8984..b6af63f 100644
--- a/src/core/Store/NIOFSDirectory.cs
+++ b/src/core/Store/NIOFSDirectory.cs
@@ -66,7 +66,7 @@ namespace Lucene.Net.Store
             public override IndexInput OpenSlice(string sliceDescription, long offset, long length)
             {
                 return new NIOFSIndexInput(sliceDescription, path, descriptor, /*descriptor.getChannel(),*/ offset,
-                    length, BufferedIndexInput.BufferSize(context), parent.ReadChunkSize);
+                    length, BufferedIndexInput.GetBufferSize(context), parent.ReadChunkSize);
             }
 
             public override IndexInput OpenFullSlice()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Store/SimpleFSDirectory.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/SimpleFSDirectory.cs b/src/core/Store/SimpleFSDirectory.cs
index 7053d80..74810da 100644
--- a/src/core/Store/SimpleFSDirectory.cs
+++ b/src/core/Store/SimpleFSDirectory.cs
@@ -90,7 +90,7 @@ namespace Lucene.Net.Store
             public override IndexInput OpenSlice(string sliceDescription, long offset, long length)
             {
                 return new SimpleFSIndexInput("SimpleFSIndexInput(" + sliceDescription + " in path=\"" + file.FullName + "\" slice=" + offset + ":" + (offset + length) + ")", descriptor, offset,
-                    length, BufferedIndexInput.BufferSize(context), parent.ReadChunkSize);
+                    length, BufferedIndexInput.GetBufferSize(context), parent.ReadChunkSize);
             }
 
             public override IndexInput OpenFullSlice()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Support/Deflater.cs
----------------------------------------------------------------------
diff --git a/src/core/Support/Deflater.cs b/src/core/Support/Deflater.cs
index 03473de..7db9d76 100644
--- a/src/core/Support/Deflater.cs
+++ b/src/core/Support/Deflater.cs
@@ -30,12 +30,18 @@ namespace Lucene.Net.Support
         delegate void FinishDelegate();
         delegate bool GetIsFinishedDelegate();
         delegate int DeflateDelegate(byte[] output);
+        delegate void ResetDelegate();
+        delegate bool GetIsNeedingInputDelegate();
+        delegate int DeflateDelegate3(byte[] output, int offset, int length);
 
         SetLevelDelegate setLevelMethod;
         SetInputDelegate setInputMethod;
         FinishDelegate finishMethod;
         GetIsFinishedDelegate getIsFinishedMethod;
         DeflateDelegate deflateMethod;
+        ResetDelegate resetMethod;
+        GetIsNeedingInputDelegate getIsNeedingInputMethod;
+        DeflateDelegate3 deflate3Method;
 
         public const int BEST_COMPRESSION = 9;
 
@@ -67,6 +73,21 @@ namespace Lucene.Net.Support
                 typeof(DeflateDelegate),
                 deflaterInstance,
                 type.GetMethod("Deflate", new Type[] { typeof(byte[]) }));
+
+            resetMethod = (ResetDelegate)Delegate.CreateDelegate(
+                typeof(ResetDelegate),
+                deflaterInstance,
+                type.GetMethod("Reset", Type.EmptyTypes));
+
+            getIsNeedingInputMethod = (GetIsNeedingInputDelegate)Delegate.CreateDelegate(
+                typeof(GetIsNeedingInputDelegate),
+                deflaterInstance,
+                type.GetMethod("get_IsNeedingInput", Type.EmptyTypes));
+
+            deflate3Method = (DeflateDelegate3)Delegate.CreateDelegate(
+                typeof(DeflateDelegate3),
+                deflaterInstance,
+                type.GetMethod("Deflate", new Type[] { typeof(byte[]), typeof(int), typeof(int) }));
         }
 
         public void SetLevel(int level)
@@ -93,5 +114,20 @@ namespace Lucene.Net.Support
         {
             return deflateMethod(output);
         }
+
+        public int Deflate(byte[] output, int offset, int length)
+        {
+            return deflate3Method(output, offset, length);
+        }
+
+        public void Reset()
+        {
+            resetMethod();
+        }
+
+        public bool IsNeedingInput
+        {
+            get { return getIsNeedingInputMethod(); }
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Support/Inflater.cs
----------------------------------------------------------------------
diff --git a/src/core/Support/Inflater.cs b/src/core/Support/Inflater.cs
index a67add0..5b16fc8 100644
--- a/src/core/Support/Inflater.cs
+++ b/src/core/Support/Inflater.cs
@@ -28,10 +28,16 @@ namespace Lucene.Net.Support
         delegate void SetInputDelegate(byte[] buffer);
         delegate bool GetIsFinishedDelegate();
         delegate int InflateDelegate(byte[] buffer);
+        delegate void ResetDelegate();
+        delegate void SetInputDelegate3(byte[] buffer, int index, int count);
+        delegate int InflateDelegate3(byte[] buffer, int offset, int count);
 
         SetInputDelegate setInputMethod;
         GetIsFinishedDelegate getIsFinishedMethod;
         InflateDelegate inflateMethod;
+        ResetDelegate resetMethod;
+        SetInputDelegate3 setInput3Method;
+        InflateDelegate3 inflate3Method;
 
         internal Inflater(object inflaterInstance)
         {
@@ -51,6 +57,21 @@ namespace Lucene.Net.Support
                 typeof(InflateDelegate),
                 inflaterInstance,
                 type.GetMethod("Inflate", new Type[] { typeof(byte[]) }));
+
+            resetMethod = (ResetDelegate)Delegate.CreateDelegate(
+                typeof(ResetDelegate),
+                inflaterInstance,
+                type.GetMethod("Reset", Type.EmptyTypes));
+
+            setInput3Method = (SetInputDelegate3)Delegate.CreateDelegate(
+                typeof(SetInputDelegate3),
+                inflaterInstance,
+                type.GetMethod("SetInput", new Type[] { typeof(byte[]), typeof(int), typeof(int) }));
+
+            inflate3Method = (InflateDelegate3)Delegate.CreateDelegate(
+                typeof(InflateDelegate3),
+                inflaterInstance,
+                type.GetMethod("Inflate", new Type[] { typeof(byte[]), typeof(int), typeof(int) }));
         }
 
         public void SetInput(byte[] buffer)
@@ -58,6 +79,11 @@ namespace Lucene.Net.Support
             setInputMethod(buffer);
         }
 
+        public void SetInput(byte[] buffer, int index, int count)
+        {
+            setInput3Method(buffer, index, count);
+        }
+
         public bool IsFinished
         {
             get { return getIsFinishedMethod(); }
@@ -67,5 +93,15 @@ namespace Lucene.Net.Support
         {
             return inflateMethod(buffer);
         }
+
+        public int Inflate(byte[] buffer, int offset, int count)
+        {
+            return inflate3Method(buffer, offset, count);
+        }
+
+        public void Reset()
+        {
+            resetMethod();
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Support/StringBuilderExtensions.cs
----------------------------------------------------------------------
diff --git a/src/core/Support/StringBuilderExtensions.cs b/src/core/Support/StringBuilderExtensions.cs
new file mode 100644
index 0000000..769dde1
--- /dev/null
+++ b/src/core/Support/StringBuilderExtensions.cs
@@ -0,0 +1,28 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Support
+{
+    public static class StringBuilderExtensions
+    {
+        public static StringBuilder Reverse(this StringBuilder text)
+        {
+            if (text.Length > 1)
+            {
+                int pivotPos = text.Length / 2;
+                for (int i = 0; i < pivotPos; i++)
+                {
+                    int iRight = text.Length - (i + 1);
+                    char rightChar = text[i];
+                    char leftChar = text[iRight];
+                    text[i] = leftChar;
+                    text[iRight] = rightChar;
+                }
+            }
+
+            return text;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/733dc181/src/core/Util/Fst/ReverseBytesReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/ReverseBytesReader.cs b/src/core/Util/Fst/ReverseBytesReader.cs
index ad9c4d1..2df7b8c 100644
--- a/src/core/Util/Fst/ReverseBytesReader.cs
+++ b/src/core/Util/Fst/ReverseBytesReader.cs
@@ -12,14 +12,14 @@
 
         public override byte ReadByte()
         {
-            return bytes[Position--];
+            return (byte)bytes[Position--];
         }
 
         public override void ReadBytes(byte[] b, int offset, int len)
         {
             for (var i = 0; i < len; i++)
             {
-                b[offset + i] = bytes[Position--];
+                b[offset + i] = (byte)bytes[Position--];
             }
         }
 


[18/50] [abbrv] Massive cleanup, reducing compiler errors

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/MultiDocValues.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MultiDocValues.cs b/src/core/Index/MultiDocValues.cs
index 54ff6ec..1c732d8 100644
--- a/src/core/Index/MultiDocValues.cs
+++ b/src/core/Index/MultiDocValues.cs
@@ -39,7 +39,7 @@ namespace Lucene.Net.Index
             }
             else if (size == 1)
             {
-                return leaves[0].Reader.GetNormValues(field);
+                return ((AtomicReader)leaves[0].Reader).GetNormValues(field);
             }
             FieldInfo fi = MultiFields.GetMergedFieldInfos(r).FieldInfo(field);
             if (fi == null || fi.HasNorms == false)
@@ -53,7 +53,7 @@ namespace Lucene.Net.Index
             for (int i = 0; i < size; i++)
             {
                 AtomicReaderContext context = leaves[i];
-                NumericDocValues v = context.Reader.GetNormValues(field);
+                NumericDocValues v = ((AtomicReader)context.Reader).GetNormValues(field);
                 if (v == null)
                 {
                     v = NumericDocValues.EMPTY;
@@ -100,7 +100,7 @@ namespace Lucene.Net.Index
             }
             else if (size == 1)
             {
-                return leaves[0].Reader.GetNumericDocValues(field);
+                return ((AtomicReader)leaves[0].Reader).GetNumericDocValues(field);
             }
 
             bool anyReal = false;
@@ -109,7 +109,7 @@ namespace Lucene.Net.Index
             for (int i = 0; i < size; i++)
             {
                 AtomicReaderContext context = leaves[i];
-                NumericDocValues v = context.Reader.GetNumericDocValues(field);
+                NumericDocValues v = ((AtomicReader)context.Reader).GetNumericDocValues(field);
                 if (v == null)
                 {
                     v = NumericDocValues.EMPTY;
@@ -162,7 +162,7 @@ namespace Lucene.Net.Index
             }
             else if (size == 1)
             {
-                return leaves[0].Reader.GetBinaryDocValues(field);
+                return ((AtomicReader)leaves[0].Reader).GetBinaryDocValues(field);
             }
 
             bool anyReal = false;
@@ -171,7 +171,7 @@ namespace Lucene.Net.Index
             for (int i = 0; i < size; i++)
             {
                 AtomicReaderContext context = leaves[i];
-                BinaryDocValues v = context.Reader.GetBinaryDocValues(field);
+                BinaryDocValues v = ((AtomicReader)context.Reader).GetBinaryDocValues(field);
                 if (v == null)
                 {
                     v = BinaryDocValues.EMPTY;
@@ -206,7 +206,7 @@ namespace Lucene.Net.Index
             }
             else if (size == 1)
             {
-                return leaves[0].Reader.GetSortedDocValues(field);
+                return ((AtomicReader)leaves[0].Reader).GetSortedDocValues(field);
             }
 
             bool anyReal = false;
@@ -215,7 +215,7 @@ namespace Lucene.Net.Index
             for (int i = 0; i < size; i++)
             {
                 AtomicReaderContext context = leaves[i];
-                SortedDocValues v = context.Reader.GetSortedDocValues(field);
+                SortedDocValues v = ((AtomicReader)context.Reader).GetSortedDocValues(field);
                 if (v == null)
                 {
                     v = SortedDocValues.EMPTY;
@@ -256,7 +256,7 @@ namespace Lucene.Net.Index
             }
             else if (size == 1)
             {
-                return leaves[0].Reader.GetSortedSetDocValues(field);
+                return ((AtomicReader)leaves[0].Reader).GetSortedSetDocValues(field);
             }
 
             bool anyReal = false;
@@ -265,7 +265,7 @@ namespace Lucene.Net.Index
             for (int i = 0; i < size; i++)
             {
                 AtomicReaderContext context = leaves[i];
-                SortedSetDocValues v = context.Reader.GetSortedSetDocValues(field);
+                SortedSetDocValues v = ((AtomicReader)context.Reader).GetSortedSetDocValues(field);
                 if (v == null)
                 {
                     v = SortedSetDocValues.EMPTY;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/MultiReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MultiReader.cs b/src/core/Index/MultiReader.cs
index c6c3778..f1c31cc 100644
--- a/src/core/Index/MultiReader.cs
+++ b/src/core/Index/MultiReader.cs
@@ -64,7 +64,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        protected internal override void DoClose()
+        protected override void DoClose()
         {
             System.IO.IOException ioe = null;
             foreach (IndexReader r in GetSequentialSubReaders())

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/NormsConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/NormsConsumerPerField.cs b/src/core/Index/NormsConsumerPerField.cs
index 7371f57..40df7ac 100644
--- a/src/core/Index/NormsConsumerPerField.cs
+++ b/src/core/Index/NormsConsumerPerField.cs
@@ -58,5 +58,9 @@ namespace Lucene.Net.Index
         {
             get { return consumer == null; }
         }
+
+        public override void Abort()
+        {
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/ParallelCompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ParallelCompositeReader.cs b/src/core/Index/ParallelCompositeReader.cs
index 6168ab2..c3452b0 100644
--- a/src/core/Index/ParallelCompositeReader.cs
+++ b/src/core/Index/ParallelCompositeReader.cs
@@ -125,7 +125,7 @@ namespace Lucene.Net.Index
             {
             }
 
-            protected internal override void DoClose()
+            protected override void DoClose()
             {
             }
         }
@@ -160,7 +160,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        protected internal override void DoClose()
+        protected override void DoClose()
         {
             System.IO.IOException ioe = null;
             foreach (IndexReader reader in completeReaderSet)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/SegmentInfos.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentInfos.cs b/src/core/Index/SegmentInfos.cs
index cc47b2f..569b19e 100644
--- a/src/core/Index/SegmentInfos.cs
+++ b/src/core/Index/SegmentInfos.cs
@@ -286,7 +286,7 @@ namespace Lucene.Net.Index
             }
 
 
-            public override object DoBody(string segmentFileName)
+            protected override object DoBody(string segmentFileName)
             {
                 enclosingInstance.Read(directory, segmentFileName);
                 return null;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/SlowCompositeReaderWrapper.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SlowCompositeReaderWrapper.cs b/src/core/Index/SlowCompositeReaderWrapper.cs
index 10dc3f6..6542b13 100644
--- a/src/core/Index/SlowCompositeReaderWrapper.cs
+++ b/src/core/Index/SlowCompositeReaderWrapper.cs
@@ -97,7 +97,7 @@ namespace Lucene.Net.Index
             for (int i = 0; i < size; i++)
             {
                 AtomicReaderContext context = in_renamed.Leaves[i];
-                SortedDocValues v = context.Reader.GetSortedDocValues(field);
+                SortedDocValues v = ((AtomicReader)context.Reader).GetSortedDocValues(field);
                 if (v == null)
                 {
                     v = SortedDocValues.EMPTY;
@@ -109,6 +109,52 @@ namespace Lucene.Net.Index
             return new MultiSortedDocValues(values, starts, map);
         }
 
+        public override SortedSetDocValues GetSortedSetDocValues(string field)
+        {
+            EnsureOpen();
+            OrdinalMap map = null;
+            lock (cachedOrdMaps)
+            {
+                map = cachedOrdMaps[field];
+                if (map == null)
+                {
+                    // uncached, or not a multi dv
+                    SortedSetDocValues dv = MultiDocValues.GetSortedSetValues(in_renamed, field);
+                    if (dv is MultiDocValues.MultiSortedSetDocValues)
+                    {
+                        map = ((MultiDocValues.MultiSortedSetDocValues)dv).mapping;
+                        if (map.owner == CoreCacheKey)
+                        {
+                            cachedOrdMaps[field] = map;
+                        }
+                    }
+                    return dv;
+                }
+            }
+            // cached ordinal map
+            if (FieldInfos.FieldInfo(field).DocValuesTypeValue != DocValuesType.SORTED_SET)
+            {
+                return null;
+            }
+            //assert map != null;
+            int size = in_renamed.Leaves.Count;
+            SortedSetDocValues[] values = new SortedSetDocValues[size];
+            int[] starts = new int[size + 1];
+            for (int i = 0; i < size; i++)
+            {
+                AtomicReaderContext context = in_renamed.Leaves[i];
+                SortedSetDocValues v = ((AtomicReader)context.Reader).GetSortedSetDocValues(field);
+                if (v == null)
+                {
+                    v = SortedSetDocValues.EMPTY;
+                }
+                values[i] = v;
+                starts[i] = context.docBase;
+            }
+            starts[size] = MaxDoc;
+            return new MultiDocValues.MultiSortedSetDocValues(values, starts, map);
+        }
+
         // TODO: this could really be a weak map somewhere else on the coreCacheKey,
         // but do we really need to optimize slow-wrapper any more?
         private readonly IDictionary<String, OrdinalMap> cachedOrdMaps = new HashMap<String, OrdinalMap>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/StandardDirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/StandardDirectoryReader.cs b/src/core/Index/StandardDirectoryReader.cs
index fb3e0c8..dd9c006 100644
--- a/src/core/Index/StandardDirectoryReader.cs
+++ b/src/core/Index/StandardDirectoryReader.cs
@@ -33,7 +33,7 @@ namespace Lucene.Net.Index
             {
             }
 
-            public override object DoBody(string segmentFileName)
+            protected override object DoBody(string segmentFileName)
             {
                 SegmentInfos sis = new SegmentInfos();
                 sis.Read(directory, segmentFileName);
@@ -271,12 +271,12 @@ namespace Lucene.Net.Index
             return buffer.ToString();
         }
 
-        protected override DirectoryReader DoOpenIfChanged()
+        protected internal override DirectoryReader DoOpenIfChanged()
         {
             return DoOpenIfChanged((IndexCommit)null);
         }
 
-        protected override DirectoryReader DoOpenIfChanged(IndexCommit commit)
+        protected internal override DirectoryReader DoOpenIfChanged(IndexCommit commit)
         {
             EnsureOpen();
 
@@ -292,7 +292,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        protected override DirectoryReader DoOpenIfChanged(IndexWriter writer, bool applyAllDeletes)
+        protected internal override DirectoryReader DoOpenIfChanged(IndexWriter writer, bool applyAllDeletes)
         {
             EnsureOpen();
             if (writer == this.writer && applyAllDeletes == this.applyAllDeletes)
@@ -363,7 +363,7 @@ namespace Lucene.Net.Index
                 this.parent = parent;
             }
 
-            public override object DoBody(string segmentFileName)
+            protected override object DoBody(string segmentFileName)
             {
                 SegmentInfos infos = new SegmentInfos();
                 infos.Read(directory, segmentFileName);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/TermVectorsConsumerPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/TermVectorsConsumerPerField.cs b/src/core/Index/TermVectorsConsumerPerField.cs
index a6aaec3..adda8ec 100644
--- a/src/core/Index/TermVectorsConsumerPerField.cs
+++ b/src/core/Index/TermVectorsConsumerPerField.cs
@@ -51,18 +51,18 @@ namespace Lucene.Net.Index
             for (int i = 0; i < count; i++)
             {
                 IIndexableField field = fields[i];
-                if (field.FieldType.Indexed)
+                if (field.FieldTypeValue.Indexed)
                 {
-                    if (field.FieldType.StoreTermVectors)
+                    if (field.FieldTypeValue.StoreTermVectors)
                     {
                         doVectors = true;
-                        doVectorPositions |= field.FieldType.StoreTermVectorPositions;
-                        doVectorOffsets |= field.FieldType.StoreTermVectorOffsets;
+                        doVectorPositions |= field.FieldTypeValue.StoreTermVectorPositions;
+                        doVectorOffsets |= field.FieldTypeValue.StoreTermVectorOffsets;
                         if (doVectorPositions)
                         {
-                            doVectorPayloads |= field.FieldType.StoreTermVectorPayloads;
+                            doVectorPayloads |= field.FieldTypeValue.StoreTermVectorPayloads;
                         }
-                        else if (field.FieldType.StoreTermVectorPayloads)
+                        else if (field.FieldTypeValue.StoreTermVectorPayloads)
                         {
                             // TODO: move this check somewhere else, and impl the other missing ones
                             throw new ArgumentException("cannot index term vector payloads for field: " + field + " without term vector positions");
@@ -125,7 +125,7 @@ namespace Lucene.Net.Index
 
         public void Abort() { }
 
-        internal void Finish()
+        public override void Finish()
         {
             if (!doVectors || termsHashPerField.bytesHash.Size == 0)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/TermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/TermsEnum.cs b/src/core/Index/TermsEnum.cs
index 49b003b..47fa599 100644
--- a/src/core/Index/TermsEnum.cs
+++ b/src/core/Index/TermsEnum.cs
@@ -107,7 +107,7 @@ namespace Lucene.Net.Index
                 get { throw new InvalidOperationException("this property should never be called."); }
             }
 
-            public IComparer<BytesRef> Comparator
+            public override IComparer<BytesRef> Comparator
             {
                 get { return null; }
             }
@@ -137,7 +137,7 @@ namespace Lucene.Net.Index
                 throw new InvalidOperationException("this method should never be called.");
             }
 
-            public BytesRef Next()
+            public override BytesRef Next()
             {
                 return null;
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/TermsHash.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/TermsHash.cs b/src/core/Index/TermsHash.cs
index 5799ed4..e6e2d00 100644
--- a/src/core/Index/TermsHash.cs
+++ b/src/core/Index/TermsHash.cs
@@ -101,7 +101,7 @@ namespace Lucene.Net.Index
             bytePool.Reset(false, false);
         }
 
-        internal override void Flush(IDictionary<string, InvertedDocConsumerPerField> fieldsToFlush, SegmentWriteState state)
+        public override void Flush(IDictionary<string, InvertedDocConsumerPerField> fieldsToFlush, SegmentWriteState state)
         {
             IDictionary<String, TermsHashConsumerPerField> childFields = new HashMap<String, TermsHashConsumerPerField>();
             IDictionary<String, InvertedDocConsumerPerField> nextChildFields;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/TermsHashPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/TermsHashPerField.cs b/src/core/Index/TermsHashPerField.cs
index a9428c8..b4c3ebc 100644
--- a/src/core/Index/TermsHashPerField.cs
+++ b/src/core/Index/TermsHashPerField.cs
@@ -118,7 +118,7 @@ namespace Lucene.Net.Index
         private bool doCall;
         private bool doNextCall;
 
-        internal override void Start(IIndexableField f)
+        public override void Start(IIndexableField f)
         {
             termAtt = fieldState.attributeSource.AddAttribute<ITermToBytesRefAttribute>();
             termBytesRef = termAtt.BytesRef;
@@ -129,7 +129,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        internal override bool Start(IIndexableField[] fields, int count)
+        public override bool Start(IIndexableField[] fields, int count)
         {
             doCall = consumer.Start(fields, count);
             bytesHash.Reinit();
@@ -186,7 +186,7 @@ namespace Lucene.Net.Index
         }
 
         // Primary entry point (for first TermsHash)
-        internal override void Add()
+        public override void Add()
         {
             // We are first in the chain so we must "intern" the
             // term text into textStart address
@@ -302,7 +302,7 @@ namespace Lucene.Net.Index
             WriteByte(stream, (byte)i);
         }
 
-        internal override void Finish()
+        public override void Finish()
         {
             consumer.Finish();
             if (nextPerField != null)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Lucene.Net.csproj
----------------------------------------------------------------------
diff --git a/src/core/Lucene.Net.csproj b/src/core/Lucene.Net.csproj
index 306396c..de68b1d 100644
--- a/src/core/Lucene.Net.csproj
+++ b/src/core/Lucene.Net.csproj
@@ -193,6 +193,7 @@
     <Compile Include="Codecs\Compressing\CompressingStoredFieldsWriter.cs" />
     <Compile Include="Codecs\Compressing\CompressingTermVectorsFormat.cs" />
     <Compile Include="Codecs\Compressing\CompressingTermVectorsReader.cs" />
+    <Compile Include="Codecs\Compressing\CompressingTermVectorsWriter.cs" />
     <Compile Include="Codecs\Compressing\CompressionMode.cs" />
     <Compile Include="Codecs\Compressing\Compressor.cs" />
     <Compile Include="Codecs\Compressing\Decompressor.cs" />

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/AutomatonQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/AutomatonQuery.cs b/src/core/Search/AutomatonQuery.cs
index 91da42f..6a5440f 100644
--- a/src/core/Search/AutomatonQuery.cs
+++ b/src/core/Search/AutomatonQuery.cs
@@ -23,7 +23,7 @@ namespace Lucene.Net.Search
             this.compiled = new CompiledAutomaton(automaton);
         }
 
-        protected override TermsEnum GetTermsEnum(Terms terms, AttributeSource atts)
+        protected internal override TermsEnum GetTermsEnum(Terms terms, AttributeSource atts)
         {
             return compiled.GetTermsEnum(terms);
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/BitsFilteredDocIdSet.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/BitsFilteredDocIdSet.cs b/src/core/Search/BitsFilteredDocIdSet.cs
index d3f607f..6db4a39 100644
--- a/src/core/Search/BitsFilteredDocIdSet.cs
+++ b/src/core/Search/BitsFilteredDocIdSet.cs
@@ -34,7 +34,7 @@ namespace Lucene.Net.Search
 			this.acceptDocs = acceptDocs;
 		}
         
-		protected override bool Match(int docid)
+		public override bool Match(int docid)
 		{
 			return acceptDocs[docid];
 		}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/ConstantScoreAutoRewrite.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ConstantScoreAutoRewrite.cs b/src/core/Search/ConstantScoreAutoRewrite.cs
index 28478e9..f12f920 100644
--- a/src/core/Search/ConstantScoreAutoRewrite.cs
+++ b/src/core/Search/ConstantScoreAutoRewrite.cs
@@ -59,11 +59,11 @@ namespace Lucene.Net.Search
             }
             else if (size == 0)
             {
-                return GetTopLevelQuery();
+                return TopLevelQuery;
             }
             else
             {
-                BooleanQuery bq = GetTopLevelQuery();
+                BooleanQuery bq = TopLevelQuery;
                 BytesRefHash pendingTerms = col.pendingTerms;
                 int[] sort = pendingTerms.Sort(col.termsEnum.Comparator);
                 for (int i = 0; i < size; i++)
@@ -86,6 +86,9 @@ namespace Lucene.Net.Search
             {
                 this.docCountCutoff = docCountCutoff;
                 this.termCountLimit = termCountLimit;
+
+                // .NET Port: moved from inline here
+                this.pendingTerms = new BytesRefHash(new ByteBlockPool(new ByteBlockPool.DirectAllocator()), 16, array);
             }
 
             public override void SetNextEnum(TermsEnum termsEnum)
@@ -108,7 +111,7 @@ namespace Lucene.Net.Search
                 if (pos < 0)
                 {
                     pos = (-pos) - 1;
-                    array.termState[pos].register(termState, readerContext.ord, termsEnum.DocFreq, termsEnum.TotalTermFreq);
+                    array.termState[pos].Register(termState, readerContext.ord, termsEnum.DocFreq, termsEnum.TotalTermFreq);
                 }
                 else
                 {
@@ -123,7 +126,7 @@ namespace Lucene.Net.Search
 
             internal int docCountCutoff, termCountLimit;
             internal TermStateByteStart array = new TermStateByteStart(16);
-            internal BytesRefHash pendingTerms = new BytesRefHash(new ByteBlockPool(new ByteBlockPool.DirectAllocator()), 16, array);
+            internal BytesRefHash pendingTerms; // .NET port: initialization moved to ctor
         }
 
         public override int GetHashCode()

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/DocTermOrdsRangeFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/DocTermOrdsRangeFilter.cs b/src/core/Search/DocTermOrdsRangeFilter.cs
index 1963348..1f2a2ea 100644
--- a/src/core/Search/DocTermOrdsRangeFilter.cs
+++ b/src/core/Search/DocTermOrdsRangeFilter.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Search
             this.includeUpper = includeUpper;
         }
 
-        public abstract DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs);
+        public abstract override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs);
 
         public static DocTermOrdsRangeFilter NewBytesRefRange(string field, BytesRef lowerVal, BytesRef upperVal, bool includeLower, bool includeUpper)
         {
@@ -41,7 +41,7 @@ namespace Lucene.Net.Search
 
             public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
             {
-                SortedSetDocValues docTermOrds = FieldCache.DEFAULT.GetDocTermOrds(context.Reader, field);
+                SortedSetDocValues docTermOrds = FieldCache.DEFAULT.GetDocTermOrds((AtomicReader)context.Reader, field);
                 long lowerPoint = lowerVal == null ? -1 : docTermOrds.LookupTerm(lowerVal);
                 long upperPoint = upperVal == null ? -1 : docTermOrds.LookupTerm(upperVal);
 
@@ -92,7 +92,7 @@ namespace Lucene.Net.Search
 
                 //assert inclusiveLowerPoint >= 0 && inclusiveUpperPoint >= 0;
 
-                return new AnonymousFieldCacheDocIdSet(context.Reader.MaxDoc, acceptDocs);
+                return new AnonymousFieldCacheDocIdSet(context.Reader.MaxDoc, acceptDocs, docTermOrds, inclusiveLowerPoint, inclusiveUpperPoint);
             }
 
             private sealed class AnonymousFieldCacheDocIdSet : FieldCacheDocIdSet

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/FieldCacheRangeFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FieldCacheRangeFilter.cs b/src/core/Search/FieldCacheRangeFilter.cs
index bcb82b3..21b842a 100644
--- a/src/core/Search/FieldCacheRangeFilter.cs
+++ b/src/core/Search/FieldCacheRangeFilter.cs
@@ -73,7 +73,7 @@ namespace Lucene.Net.Search
                     this.inclusiveUpperPoint = inclusiveUpperPoint;
                 }
 
-                internal override bool MatchDoc(int doc)
+                protected override bool MatchDoc(int doc)
                 {
                     int docOrd = fcsi.GetOrd(doc);
                     return docOrd >= inclusiveLowerPoint && docOrd <= inclusiveUpperPoint;
@@ -87,7 +87,7 @@ namespace Lucene.Net.Search
 
             public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
             {
-                SortedDocValues fcsi = FieldCache.DEFAULT.GetTermsIndex(context.Reader, field);
+                SortedDocValues fcsi = FieldCache.DEFAULT.GetTermsIndex((AtomicReader)context.Reader, field);
                 int lowerPoint = lowerVal == null ? -1 : fcsi.LookupTerm(new BytesRef(lowerVal));
                 int upperPoint = upperVal == null ? -1 : fcsi.LookupTerm(new BytesRef(upperVal));
 
@@ -161,7 +161,7 @@ namespace Lucene.Net.Search
                     this.inclusiveUpperPoint = inclusiveUpperPoint;
                 }
 
-                internal override bool MatchDoc(int doc)
+                protected override bool MatchDoc(int doc)
                 {
                     int docOrd = fcsi.GetOrd(doc);
                     return docOrd >= inclusiveLowerPoint && docOrd <= inclusiveUpperPoint;
@@ -175,7 +175,7 @@ namespace Lucene.Net.Search
 
             public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
             {
-                SortedDocValues fcsi = FieldCache.DEFAULT.GetTermsIndex(context.Reader, field);
+                SortedDocValues fcsi = FieldCache.DEFAULT.GetTermsIndex((AtomicReader)context.Reader, field);
                 int lowerPoint = lowerVal == null ? -1 : fcsi.LookupTerm(lowerVal);
                 int upperPoint = upperVal == null ? -1 : fcsi.LookupTerm(upperVal);
 
@@ -246,7 +246,7 @@ namespace Lucene.Net.Search
                     this.inclusiveUpperPoint = inclusiveUpperPoint;
                 }
 
-                internal override bool MatchDoc(int doc)
+                protected override bool MatchDoc(int doc)
                 {
                     sbyte value = values.Get(doc);
                     return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
@@ -287,7 +287,7 @@ namespace Lucene.Net.Search
                 if (inclusiveLowerPoint > inclusiveUpperPoint)
                     return DocIdSet.EMPTY_DOCIDSET;
 
-                FieldCache.Bytes values = FieldCache.DEFAULT.GetBytes(context.Reader, field, (FieldCache.IByteParser)parser, false);
+                FieldCache.Bytes values = FieldCache.DEFAULT.GetBytes((AtomicReader)context.Reader, field, (FieldCache.IByteParser)parser, false);
 
                 // we only request the usage of termDocs, if the range contains 0
                 return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, context.Reader.MaxDoc, acceptDocs);
@@ -311,7 +311,7 @@ namespace Lucene.Net.Search
                     this.inclusiveUpperPoint = inclusiveUpperPoint;
                 }
 
-                internal override bool MatchDoc(int doc)
+                protected override bool MatchDoc(int doc)
                 {
                     short value = values.Get(doc);
                     return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
@@ -353,7 +353,7 @@ namespace Lucene.Net.Search
                 if (inclusiveLowerPoint > inclusiveUpperPoint)
                     return DocIdSet.EMPTY_DOCIDSET;
 
-                FieldCache.Shorts values = FieldCache.DEFAULT.GetShorts(context.Reader, field, (FieldCache.IShortParser)parser, false);
+                FieldCache.Shorts values = FieldCache.DEFAULT.GetShorts((AtomicReader)context.Reader, field, (FieldCache.IShortParser)parser, false);
 
                 // we only request the usage of termDocs, if the range contains 0
                 return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, context.Reader.MaxDoc, acceptDocs);
@@ -378,7 +378,7 @@ namespace Lucene.Net.Search
                     this.inclusiveUpperPoint = inclusiveUpperPoint;
                 }
 
-                internal override bool MatchDoc(int doc)
+                protected override bool MatchDoc(int doc)
                 {
                     int value = values.Get(doc);
                     return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
@@ -420,7 +420,7 @@ namespace Lucene.Net.Search
                 if (inclusiveLowerPoint > inclusiveUpperPoint)
                     return DocIdSet.EMPTY_DOCIDSET;
 
-                FieldCache.Ints values = FieldCache.DEFAULT.GetInts(context.Reader, field, (FieldCache.IIntParser)parser, false);
+                FieldCache.Ints values = FieldCache.DEFAULT.GetInts((AtomicReader)context.Reader, field, (FieldCache.IIntParser)parser, false);
                 // we only request the usage of termDocs, if the range contains 0
                 return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, context.Reader.MaxDoc, acceptDocs);
             }
@@ -444,7 +444,7 @@ namespace Lucene.Net.Search
                     this.inclusiveUpperPoint = inclusiveUpperPoint;
                 }
 
-                internal override bool MatchDoc(int doc)
+                protected override bool MatchDoc(int doc)
                 {
                     long value = values.Get(doc);
                     return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
@@ -486,7 +486,7 @@ namespace Lucene.Net.Search
                 if (inclusiveLowerPoint > inclusiveUpperPoint)
                     return DocIdSet.EMPTY_DOCIDSET;
 
-                FieldCache.Longs values = FieldCache.DEFAULT.GetLongs(context.Reader, field, (FieldCache.ILongParser)parser, false);
+                FieldCache.Longs values = FieldCache.DEFAULT.GetLongs((AtomicReader)context.Reader, field, (FieldCache.ILongParser)parser, false);
                 // we only request the usage of termDocs, if the range contains 0
                 return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, context.Reader.MaxDoc, acceptDocs);
             }
@@ -510,7 +510,7 @@ namespace Lucene.Net.Search
                     this.inclusiveUpperPoint = inclusiveUpperPoint;
                 }
 
-                internal override bool MatchDoc(int doc)
+                protected override bool MatchDoc(int doc)
                 {
                     float value = values.Get(doc);
                     return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
@@ -556,7 +556,7 @@ namespace Lucene.Net.Search
                 if (inclusiveLowerPoint > inclusiveUpperPoint)
                     return DocIdSet.EMPTY_DOCIDSET;
 
-                FieldCache.Floats values = FieldCache.DEFAULT.GetFloats(context.Reader, field, (FieldCache.IFloatParser)parser, false);
+                FieldCache.Floats values = FieldCache.DEFAULT.GetFloats((AtomicReader)context.Reader, field, (FieldCache.IFloatParser)parser, false);
 
                 // we only request the usage of termDocs, if the range contains 0
                 return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, context.Reader.MaxDoc, acceptDocs);
@@ -581,7 +581,7 @@ namespace Lucene.Net.Search
                     this.inclusiveUpperPoint = inclusiveUpperPoint;
                 }
 
-                internal override bool MatchDoc(int doc)
+                protected override bool MatchDoc(int doc)
                 {
                     double value = values.Get(doc);
                     return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
@@ -627,7 +627,7 @@ namespace Lucene.Net.Search
                 if (inclusiveLowerPoint > inclusiveUpperPoint)
                     return DocIdSet.EMPTY_DOCIDSET;
 
-                FieldCache.Doubles values = FieldCache.DEFAULT.GetDoubles(context.Reader, field, (FieldCache.IDoubleParser)parser, false);
+                FieldCache.Doubles values = FieldCache.DEFAULT.GetDoubles((AtomicReader)context.Reader, field, (FieldCache.IDoubleParser)parser, false);
 
                 // we only request the usage of termDocs, if the range contains 0
                 return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, context.Reader.MaxDoc, acceptDocs);
@@ -779,7 +779,7 @@ namespace Lucene.Net.Search
         }
 
         /// <summary>This method is implemented for each data type </summary>
-        public abstract DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs);
+        public abstract override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs);
 
         public override string ToString()
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/FieldComparator.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FieldComparator.cs b/src/core/Search/FieldComparator.cs
index 7bdf4be..59ab91e 100644
--- a/src/core/Search/FieldComparator.cs
+++ b/src/core/Search/FieldComparator.cs
@@ -98,11 +98,11 @@ namespace Lucene.Net.Search
         /// </param>
         /// <returns> value in this slot upgraded to Comparable
         /// </returns>
-        public abstract override T Value(int slot);
+        public abstract override object Value(int slot);
 
         public T this[int slot]
         {
-            get { return Value(slot); }
+            get { return (T)Value(slot); }
         }
 
         public virtual int CompareValues(T first, T second)
@@ -298,7 +298,7 @@ namespace Lucene.Net.Search
                 this.bottom = values[bottom];
             }
 
-            public override sbyte Value(int slot)
+            public override object Value(int slot)
             {
                 return values[slot];
             }
@@ -378,7 +378,7 @@ namespace Lucene.Net.Search
                 this.bottom = values[bottom];
             }
 
-            public override double Value(int slot)
+            public override object Value(int slot)
             {
                 return values[slot];
             }
@@ -459,7 +459,7 @@ namespace Lucene.Net.Search
                 this.bottom = values[bottom];
             }
 
-            public override float Value(int slot)
+            public override object Value(int slot)
             {
                 return values[slot];
             }
@@ -539,7 +539,7 @@ namespace Lucene.Net.Search
                 this.bottom = values[bottom];
             }
 
-            public override short Value(int slot)
+            public override object Value(int slot)
             {
                 return values[slot];
             }
@@ -651,7 +651,7 @@ namespace Lucene.Net.Search
                 this.bottom = values[bottom];
             }
 
-            public override int Value(int slot)
+            public override object Value(int slot)
             {
                 return values[slot];
             }
@@ -770,7 +770,7 @@ namespace Lucene.Net.Search
                 this.bottom = values[bottom];
             }
 
-            public override long Value(int slot)
+            public override object Value(int slot)
             {
                 return values[slot];
             }
@@ -861,7 +861,7 @@ namespace Lucene.Net.Search
                 }
             }
 
-            public override float Value(int slot)
+            public override object Value(int slot)
             {
                 return scores[slot];
             }
@@ -925,7 +925,7 @@ namespace Lucene.Net.Search
                 this.bottom = docIDs[bottom];
             }
 
-            public override int Value(int slot)
+            public override object Value(int slot)
             {
                 return docIDs[slot];
             }
@@ -1048,7 +1048,7 @@ namespace Lucene.Net.Search
                     parent.SetBottom(slot);
                 }
 
-                public override BytesRef Value(int slot)
+                public override object Value(int slot)
                 {
                     return parent.Value(slot);
                 }
@@ -1185,7 +1185,7 @@ namespace Lucene.Net.Search
                 }
             }
 
-            public override BytesRef Value(int slot)
+            public override object Value(int slot)
             {
                 return values[slot];
             }
@@ -1263,7 +1263,7 @@ namespace Lucene.Net.Search
                 this.bottom = values[bottom];
             }
 
-            public override BytesRef Value(int slot)
+            public override object Value(int slot)
             {
                 return values[slot];
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/FieldValueHitQueue.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FieldValueHitQueue.cs b/src/core/Search/FieldValueHitQueue.cs
index 1fea37e..fc162ae 100644
--- a/src/core/Search/FieldValueHitQueue.cs
+++ b/src/core/Search/FieldValueHitQueue.cs
@@ -70,7 +70,7 @@ namespace Lucene.Net.Search
             /// <param name="hitA">ScoreDoc</param>
             /// <param name="hitB">ScoreDoc</param>
             /// <returns><c>true</c> if document <c>a</c> should be sorted after document <c>b</c>.</returns>
-            public override bool LessThan(Entry hitA, Entry hitB)
+            public override bool LessThan(T hitA, T hitB)
             {
                 Debug.Assert(hitA != hitB);
                 Debug.Assert(hitA.slot != hitB.slot);
@@ -84,6 +84,11 @@ namespace Lucene.Net.Search
                 // avoid random sort order that could lead to duplicates (bug #31241):
                 return hitA.Doc > hitB.Doc;
             }
+
+            public override bool LessThan(Entry a, Entry b)
+            {
+                return LessThan(a, b);
+            }
         }
 
         /// <summary> An implementation of <see cref="FieldValueHitQueue" /> which is optimized in case
@@ -106,7 +111,7 @@ namespace Lucene.Net.Search
                 }
             }
 
-            public override bool LessThan(Entry hitA, Entry hitB)
+            public override bool LessThan(T hitA, T hitB)
             {
                 Debug.Assert(hitA != hitB);
                 Debug.Assert(hitA.slot != hitB.slot);
@@ -125,6 +130,11 @@ namespace Lucene.Net.Search
                 // avoid random sort order that could lead to duplicates (bug #31241):
                 return hitA.Doc > hitB.Doc;
             }
+
+            public override bool LessThan(Entry a, Entry b)
+            {
+                return LessThan(a, b);
+            }
         }
 
 
@@ -211,7 +221,7 @@ namespace Lucene.Net.Search
         protected internal FieldComparator firstComparator;
         protected internal int[] reverseMul;
 
-        public abstract override bool LessThan(FieldValueHitQueue.Entry a, FieldValueHitQueue.Entry b);
+        public abstract bool LessThan(FieldValueHitQueue.Entry a, FieldValueHitQueue.Entry b);
 
         /// <summary> Given a queue Entry, creates a corresponding FieldDoc
         /// that contains the values used to sort the given document.

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/NumericRangeQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/NumericRangeQuery.cs b/src/core/Search/NumericRangeQuery.cs
index a5269bb..1b4e90c 100644
--- a/src/core/Search/NumericRangeQuery.cs
+++ b/src/core/Search/NumericRangeQuery.cs
@@ -171,7 +171,7 @@ namespace Lucene.Net.Search
             this.maxInclusive = maxInclusive;
         }
 
-        protected override TermsEnum GetTermsEnum(Terms terms, Util.AttributeSource atts)
+        protected internal override TermsEnum GetTermsEnum(Terms terms, Util.AttributeSource atts)
         {
             if (min.HasValue && max.HasValue && (min.Value).CompareTo(max.Value) > 0)
             {
@@ -181,7 +181,7 @@ namespace Lucene.Net.Search
         }
 
 	    /// <summary>Returns the field name for this query </summary>
-	    public string Field
+	    public override string Field
 	    {
 	        get { return field; }
 	    }
@@ -258,7 +258,7 @@ namespace Lucene.Net.Search
         [System.Runtime.Serialization.OnDeserialized]
         internal void OnDeserialized(System.Runtime.Serialization.StreamingContext context)
         {
-            field = StringHelper.Intern(field);
+            field = string.Intern(field);
         }
 		
 		// members (package private, to be also fast accessible by NumericRangeTermsEnum)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Payloads/PayloadNearQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Payloads/PayloadNearQuery.cs b/src/core/Search/Payloads/PayloadNearQuery.cs
index 08b201c..ddef9cb 100644
--- a/src/core/Search/Payloads/PayloadNearQuery.cs
+++ b/src/core/Search/Payloads/PayloadNearQuery.cs
@@ -8,232 +8,237 @@ using Lucene.Net.Search.Similarities;
 using Lucene.Net.Search.Spans;
 using Lucene.Net.Util;
 
-public class PayloadNearQuery : SpanNearQuery
+namespace Lucene.Net.Search.Payloads
 {
-    protected string fieldName;
-    protected PayloadFunction function;
-
-    public PayloadNearQuery(SpanQuery[] clauses, int slop, bool inOrder)
-        : this(clauses, slop, inOrder, new AveragePayloadFunction())
+    public class PayloadNearQuery : SpanNearQuery
     {
-    }
-
-    public PayloadNearQuery(SpanQuery[] clauses, int slop, bool inOrder,
-                            PayloadFunction function)
-        : base(clauses, slop, inOrder)
-    {
-        fieldName = clauses[0].Field; // all clauses must have same field
-        this.function = function;
-    }
+        protected string fieldName;
+        protected PayloadFunction function;
 
-    public override Weight CreateWeight(IndexSearcher searcher)
-    {
-        return new PayloadNearSpanWeight(this, searcher);
-    }
+        public PayloadNearQuery(SpanQuery[] clauses, int slop, bool inOrder)
+            : this(clauses, slop, inOrder, new AveragePayloadFunction())
+        {
+        }
 
-    public override PayloadNearQuery Clone()
-    {
-        int sz = clauses.Count;
-        var newClauses = new SpanQuery[sz];
+        public PayloadNearQuery(SpanQuery[] clauses, int slop, bool inOrder,
+                                PayloadFunction function)
+            : base(clauses, slop, inOrder)
+        {
+            fieldName = clauses[0].Field; // all clauses must have same field
+            this.function = function;
+        }
 
-        for (int i = 0; i < sz; i++)
+        public override Weight CreateWeight(IndexSearcher searcher)
         {
-            newClauses[i] = (SpanQuery) clauses.[i].clone();
+            return new PayloadNearSpanWeight(this, searcher);
         }
-        var boostingNearQuery = new PayloadNearQuery(newClauses, Slop,
-                                                     inOrder, function);
-        boostingNearQuery.Boost = Boost)
-        ;
-        return boostingNearQuery;
-    }
 
-    public override string ToString(string field)
-    {
-        var buffer = new StringBuilder();
-        buffer.Append("payloadNear([");
+        public override object Clone()
+        {
+            int sz = clauses.Count;
+            var newClauses = new SpanQuery[sz];
+
+            for (int i = 0; i < sz; i++)
+            {
+                newClauses[i] = (SpanQuery)clauses[i].Clone();
+            }
+            var boostingNearQuery = new PayloadNearQuery(newClauses, Slop,
+                                                         inOrder, function);
+            boostingNearQuery.Boost = Boost;
+            return boostingNearQuery;
+        }
 
-        IEnumerator<SpanQuery> i = clauses.GetEnumerator();
-        while (i.MoveNext())
+        public override string ToString(string field)
         {
-            SpanQuery clause = i.Current;
-            buffer.Append(clause.ToString(field));
-            if (i.hasNext())
+            var buffer = new StringBuilder();
+            buffer.Append("payloadNear([");
+
+            IEnumerator<SpanQuery> i = clauses.GetEnumerator();
+            bool hasCommaSpace = false;
+            while (i.MoveNext())
             {
+                SpanQuery clause = i.Current;
+                buffer.Append(clause.ToString(field));
                 buffer.Append(", ");
+                hasCommaSpace = true;
             }
-        }
-        buffer.Append("], ");
-        buffer.Append(Slop);
-        buffer.Append(", ");
-        buffer.Append(inOrder);
-        buffer.Append(")");
-        buffer.Append(ToStringUtils.Boost(Boost));
-        return buffer.ToString();
-    }
 
-    public override int GetHashCode()
-    {
-        int prime = 31;
-        int result = base.GetHashCode();
-        result = prime * result + ((fieldName == null) ? 0 : fieldName.GetHashCode());
-        result = prime * result + ((function == null) ? 0 : function.GetHashCode());
-        return result;
-    }
+            if (hasCommaSpace)
+                buffer.Remove(buffer.Length - 2, 2);
 
-    public override bool Equals(Object obj)
-    {
-        if (this == obj)
-            return true;
-        if (!base.Equals(obj))
-            return false;
-        if (GetType() != obj.GetType())
-            return false;
-        var other = (PayloadNearQuery)obj;
-        if (fieldName == null)
-        {
-            if (other.fieldName != null)
-                return false;
+            buffer.Append("], ");
+            buffer.Append(Slop);
+            buffer.Append(", ");
+            buffer.Append(inOrder);
+            buffer.Append(")");
+            buffer.Append(ToStringUtils.Boost(Boost));
+            return buffer.ToString();
         }
-        else if (!fieldName.Equals(other.fieldName))
-            return false;
-        if (function == null)
+
+        public override int GetHashCode()
         {
-            if (other.function != null)
-                return false;
+            int prime = 31;
+            int result = base.GetHashCode();
+            result = prime * result + ((fieldName == null) ? 0 : fieldName.GetHashCode());
+            result = prime * result + ((function == null) ? 0 : function.GetHashCode());
+            return result;
         }
-        else if (!function.Equals(other.function))
-            return false;
-        return true;
-    }
 
-    public class PayloadNearSpanScorer : SpanScorer
-    {
-        private readonly BytesRef scratch = new BytesRef();
-        protected float payloadScore;
-        private int payloadsSeen;
-        private Spans spans;
-
-        protected PayloadNearSpanScorer(Spans spans, Weight weight,
-                                        Similarity similarity, Similarity.SloppySimScorer docScorer)
-            : base(spans, weight, docScorer)
+        public override bool Equals(Object obj)
         {
-            this.spans = spans;
+            if (this == obj)
+                return true;
+            if (!base.Equals(obj))
+                return false;
+            if (GetType() != obj.GetType())
+                return false;
+            var other = (PayloadNearQuery)obj;
+            if (fieldName == null)
+            {
+                if (other.fieldName != null)
+                    return false;
+            }
+            else if (!fieldName.Equals(other.fieldName))
+                return false;
+            if (function == null)
+            {
+                if (other.function != null)
+                    return false;
+            }
+            else if (!function.Equals(other.function))
+                return false;
+            return true;
         }
 
-        // Get the payloads associated with all underlying subspans
-        public void GetPayloads(Spans[] subSpans)
+        public class PayloadNearSpanScorer : SpanScorer
         {
-            for (int i = 0; i < subSpans.Length; i++)
+            private readonly BytesRef scratch = new BytesRef();
+            protected float payloadScore;
+            internal int payloadsSeen;
+            private SpansBase spans;
+
+            protected PayloadNearSpanScorer(SpansBase spans, Weight weight,
+                                            Similarity similarity, Similarity.SloppySimScorer docScorer)
+                : base(spans, weight, docScorer)
+            {
+                this.spans = spans;
+            }
+
+            // Get the payloads associated with all underlying subspans
+            public void GetPayloads(SpansBase[] subSpans)
             {
-                if (subSpans[i] is NearSpansOrdered)
+                for (int i = 0; i < subSpans.Length; i++)
                 {
-                    if ((subSpans[i]).IsPayloadAvailable())
+                    if (subSpans[i] is NearSpansOrdered)
                     {
-                        ProcessPayloads((subSpans[i]).GetPayload(),
-                                        subSpans[i].Start, subSpans[i].End);
+                        if ((subSpans[i]).IsPayloadAvailable())
+                        {
+                            ProcessPayloads((subSpans[i]).GetPayload(),
+                                            subSpans[i].Start, subSpans[i].End);
+                        }
+                        GetPayloads(((NearSpansOrdered)subSpans[i]).GetSubSpans());
                     }
-                    GetPayloads(((NearSpansOrdered)subSpans[i]).GetSubSpans());
-                }
-                else if (subSpans[i] is NearSpansUnordered)
-                {
-                    if ((subSpans[i]).IsPayloadAvailable())
+                    else if (subSpans[i] is NearSpansUnordered)
                     {
-                        ProcessPayloads((subSpans[i]).GetPayload(),
-                                        subSpans[i].Start, subSpans[i].End);
+                        if ((subSpans[i]).IsPayloadAvailable())
+                        {
+                            ProcessPayloads((subSpans[i]).GetPayload(),
+                                            subSpans[i].Start, subSpans[i].End);
+                        }
+                        GetPayloads(((NearSpansUnordered)subSpans[i]).GetSubSpans());
                     }
-                    GetPayloads(((NearSpansUnordered)subSpans[i]).GetSubSpans());
                 }
             }
-        }
 
-        // TODO change the whole spans api to use bytesRef, or nuke spans
+            // TODO change the whole spans api to use bytesRef, or nuke spans
 
-        protected void ProcessPayloads(ICollection<sbyte[]> payLoads, int start, int end)
-        {
-            foreach (var thePayload in payLoads)
+            protected void ProcessPayloads(ICollection<sbyte[]> payLoads, int start, int end)
             {
-                scratch.bytes = thePayload;
-                scratch.offset = 0;
-                scratch.length = thePayload.Length;
-                payloadScore = function.CurrentScore(doc, fieldName, start, end,
-                                                     payloadsSeen, payloadScore, docScorer.ComputePayloadFactor(doc, spans.Start, spans.End, scratch));
-                ++payloadsSeen;
+                foreach (var thePayload in payLoads)
+                {
+                    scratch.bytes = thePayload;
+                    scratch.offset = 0;
+                    scratch.length = thePayload.Length;
+                    payloadScore = function.CurrentScore(doc, fieldName, start, end,
+                                                         payloadsSeen, payloadScore, docScorer.ComputePayloadFactor(doc, spans.Start, spans.End, scratch));
+                    ++payloadsSeen;
+                }
             }
-        }
 
-        protected override bool SetFreqCurrentDoc()
-        {
-            if (!more)
+            protected override bool SetFreqCurrentDoc()
             {
-                return false;
+                if (!more)
+                {
+                    return false;
+                }
+                doc = spans.Doc;
+                freq = 0.0f;
+                payloadScore = 0;
+                payloadsSeen = 0;
+                do
+                {
+                    int matchLength = spans.End - spans.Start;
+                    freq += docScorer.ComputeSlopFactor(matchLength);
+                    var spansArr = new Spans[1];
+                    spansArr[0] = spans;
+                    GetPayloads(spansArr);
+                    more = spans.Next();
+                } while (more && (doc == spans.Doc));
+                return true;
             }
-            doc = spans.Doc;
-            freq = 0.0f;
-            payloadScore = 0;
-            payloadsSeen = 0;
-            do
-            {
-                int matchLength = spans.End - spans.Start;
-                freq += docScorer.ComputeSlopFactor(matchLength);
-                var spansArr = new Spans[1];
-                spansArr[0] = spans;
-                GetPayloads(spansArr);
-                more = spans.Next();
-            } while (more && (doc == spans.Doc));
-            return true;
-        }
 
-        public float Score()
-        {
-            return base.Score()
-                   * function.DocScore(doc, fieldName, payloadsSeen, payloadScore);
+            public float Score()
+            {
+                return base.Score()
+                       * function.DocScore(doc, fieldName, payloadsSeen, payloadScore);
+            }
         }
-    }
 
-    public class PayloadNearSpanWeight : SpanWeight
-    {
-        public PayloadNearSpanWeight(SpanQuery query, IndexSearcher searcher)
-            : base(query, searcher)
+        public class PayloadNearSpanWeight : SpanWeight
         {
-        }
+            public PayloadNearSpanWeight(SpanQuery query, IndexSearcher searcher)
+                : base(query, searcher)
+            {
+            }
 
-        public override Scorer Scorer(AtomicReaderContext context, bool scoreDocsInOrder,
-                                      bool topScorer, IBits acceptDocs)
-        {
-            return new PayloadNearSpanScorer(query.GetSpans(context, acceptDocs, termContexts), this,
-                                             similarity, similarity.GetSloppySimScorer(stats, context));
-        }
+            public override Scorer Scorer(AtomicReaderContext context, bool scoreDocsInOrder,
+                                          bool topScorer, IBits acceptDocs)
+            {
+                return new PayloadNearSpanScorer(query.GetSpans(context, acceptDocs, termContexts), this,
+                                                 similarity, similarity.GetSloppySimScorer(stats, context));
+            }
 
-        public override Explanation Explain(AtomicReaderContext context, int doc)
-        {
-            var scorer = (PayloadNearSpanScorer)Scorer(context, true, false, context.Reader.LiveDocs);
-            if (scorer != null)
+            public override Explanation Explain(AtomicReaderContext context, int doc)
             {
-                int newDoc = scorer.Advance(doc);
-                if (newDoc == doc)
+                var scorer = (PayloadNearSpanScorer)Scorer(context, true, false, ((AtomicReader)context.Reader).LiveDocs);
+                if (scorer != null)
                 {
-                    float freq = scorer.freq();
-                    Similarity.SloppySimScorer docScorer = similarity.GetSloppySimScorer(stats, context);
-                    var expl = new Explanation();
-                    expl.Description = "weight(" + Query + " in " + doc + ") [" + similarity.GetType().Name +
-                                       "], result of:";
-                    Explanation scoreExplanation = docScorer.Explain(doc, new Explanation(freq, "phraseFreq=" + freq));
-                    expl.AddDetail(scoreExplanation);
-                    expl.Value = scoreExplanation.Value;
-                    String field = ((SpanQuery)Query).Field;
-                    // now the payloads part
-                    Explanation payloadExpl = function.Explain(doc, field, scorer.payloadsSeen, scorer.payloadScore);
-                    // combined
-                    var result = new ComplexExplanation();
-                    result.AddDetail(expl);
-                    result.AddDetail(payloadExpl);
-                    result.Value = expl.Value * payloadExpl.Value;
-                    result.Description = "PayloadNearQuery, product of:";
-                    return result;
+                    int newDoc = scorer.Advance(doc);
+                    if (newDoc == doc)
+                    {
+                        float freq = scorer.Freq;
+                        Similarity.SloppySimScorer docScorer = similarity.GetSloppySimScorer(stats, context);
+                        var expl = new Explanation();
+                        expl.Description = "weight(" + Query + " in " + doc + ") [" + similarity.GetType().Name +
+                                           "], result of:";
+                        Explanation scoreExplanation = docScorer.Explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+                        expl.AddDetail(scoreExplanation);
+                        expl.Value = scoreExplanation.Value;
+                        String field = ((SpanQuery)Query).Field;
+                        // now the payloads part
+                        Explanation payloadExpl = function.Explain(doc, field, scorer.payloadsSeen, scorer.payloadScore);
+                        // combined
+                        var result = new ComplexExplanation();
+                        result.AddDetail(expl);
+                        result.AddDetail(payloadExpl);
+                        result.Value = expl.Value * payloadExpl.Value;
+                        result.Description = "PayloadNearQuery, product of:";
+                        return result;
+                    }
                 }
-            }
 
-            return new ComplexExplanation(false, 0.0f, "no matching term");
+                return new ComplexExplanation(false, 0.0f, "no matching term");
+            }
         }
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Payloads/PayloadSpanUtil.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Payloads/PayloadSpanUtil.cs b/src/core/Search/Payloads/PayloadSpanUtil.cs
index 097f110..11f5249 100644
--- a/src/core/Search/Payloads/PayloadSpanUtil.cs
+++ b/src/core/Search/Payloads/PayloadSpanUtil.cs
@@ -137,7 +137,7 @@ public class PayloadSpanUtil
     private void GetPayloads(ICollection<sbyte[]> payloads, SpanQuery query)
     {
         var termContexts = new HashMap<Term, TermContext>();
-        var terms = new TreeSet<Term>();
+        var terms = new SortedSet<Term>();
         query.ExtractTerms(terms);
         foreach (var term in terms)
         {
@@ -145,7 +145,7 @@ public class PayloadSpanUtil
         }
         foreach (AtomicReaderContext atomicReaderContext in context.Leaves)
         {
-            Spans spans = query.GetSpans(atomicReaderContext, atomicReaderContext.Reader.LiveDocs, termContexts);
+            SpansBase spans = query.GetSpans(atomicReaderContext, atomicReaderContext.Reader.LiveDocs, termContexts);
             while (spans.Next())
             {
                 if (spans.IsPayloadAvailable())

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/PhraseQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/PhraseQuery.cs b/src/core/Search/PhraseQuery.cs
index 6e9f641..5e1340f 100644
--- a/src/core/Search/PhraseQuery.cs
+++ b/src/core/Search/PhraseQuery.cs
@@ -260,9 +260,12 @@ namespace Lucene.Net.Search
                 get { return parent; }
             }
 
-            public override float GetValueForNormalization()
+            public override float ValueForNormalization
             {
-                return stats.GetValueForNormalization();
+                get
+                {
+                    return stats.ValueForNormalization;
+                }
             }
 
             public override void Normalize(float queryNorm, float topLevelBoost)
@@ -338,7 +341,7 @@ namespace Lucene.Net.Search
 
             public override Explanation Explain(AtomicReaderContext context, int doc)
             {
-                var scorer = Scorer(context, true, false, context.Reader.LiveDocs);
+                var scorer = Scorer(context, true, false, ((AtomicReader)context.Reader).LiveDocs);
                 if (scorer != null)
                 {
                     var newDoc = scorer.Advance(doc);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/PositiveScoresOnlyCollector.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/PositiveScoresOnlyCollector.cs b/src/core/Search/PositiveScoresOnlyCollector.cs
index ac91d21..a490a08 100644
--- a/src/core/Search/PositiveScoresOnlyCollector.cs
+++ b/src/core/Search/PositiveScoresOnlyCollector.cs
@@ -20,46 +20,46 @@ using Lucene.Net.Index;
 
 namespace Lucene.Net.Search
 {
-	
-	/// <summary> A <see cref="Collector" /> implementation which wraps another
-	/// <see cref="Collector" /> and makes sure only documents with
-	/// scores &gt; 0 are collected.
-	/// </summary>
-	public class PositiveScoresOnlyCollector : Collector
-	{
-		
-		private readonly Collector c;
-		private Scorer scorer;
-		
-		public PositiveScoresOnlyCollector(Collector c)
-		{
-			this.c = c;
-		}
-		
-		public override void  Collect(int doc)
-		{
-			if (scorer.Score() > 0)
-			{
-				c.Collect(doc);
-			}
-		}
-		
-		public override void  SetNextReader(AtomicReaderContext context, int docBase)
-		{
-			c.SetNextReader(context);
-		}
-		
-		public override void  SetScorer(Scorer scorer)
-		{
-			// Set a ScoreCachingWrappingScorer in case the wrapped Collector will call
-			// score() also.
-			this.scorer = new ScoreCachingWrappingScorer(scorer);
-			c.SetScorer(this.scorer);
-		}
 
-	    public override bool AcceptsDocsOutOfOrder
-	    {
-	        get { return c.AcceptsDocsOutOfOrder; }
-	    }
-	}
+    /// <summary> A <see cref="Collector" /> implementation which wraps another
+    /// <see cref="Collector" /> and makes sure only documents with
+    /// scores &gt; 0 are collected.
+    /// </summary>
+    public class PositiveScoresOnlyCollector : Collector
+    {
+
+        private readonly Collector c;
+        private Scorer scorer;
+
+        public PositiveScoresOnlyCollector(Collector c)
+        {
+            this.c = c;
+        }
+
+        public override void Collect(int doc)
+        {
+            if (scorer.Score() > 0)
+            {
+                c.Collect(doc);
+            }
+        }
+
+        public override void SetNextReader(AtomicReaderContext context)
+        {
+            c.SetNextReader(context);
+        }
+
+        public override void SetScorer(Scorer scorer)
+        {
+            // Set a ScoreCachingWrappingScorer in case the wrapped Collector will call
+            // score() also.
+            this.scorer = new ScoreCachingWrappingScorer(scorer);
+            c.SetScorer(this.scorer);
+        }
+
+        public override bool AcceptsDocsOutOfOrder
+        {
+            get { return c.AcceptsDocsOutOfOrder; }
+        }
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/PrefixQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/PrefixQuery.cs b/src/core/Search/PrefixQuery.cs
index 84bb767..108a2cc 100644
--- a/src/core/Search/PrefixQuery.cs
+++ b/src/core/Search/PrefixQuery.cs
@@ -48,7 +48,7 @@ namespace Lucene.Net.Search
 	        get { return prefix; }
 	    }
 
-        protected override TermsEnum GetTermsEnum(Terms terms, AttributeSource atts)
+        protected internal override TermsEnum GetTermsEnum(Terms terms, AttributeSource atts)
         {
             var tenum = terms.Iterator(null);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/PrefixTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/PrefixTermsEnum.cs b/src/core/Search/PrefixTermsEnum.cs
index c6d311b..854b992 100644
--- a/src/core/Search/PrefixTermsEnum.cs
+++ b/src/core/Search/PrefixTermsEnum.cs
@@ -38,7 +38,7 @@ namespace Lucene.Net.Search
             SetInitialSeekTerm(this.prefixRef = prefixText);
         }
 
-        protected override AcceptStatus accept(BytesRef term)
+        protected override AcceptStatus Accept(BytesRef term)
         {
             if (StringHelper.StartsWith(term, prefixRef))
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/ReqExclScorer.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ReqExclScorer.cs b/src/core/Search/ReqExclScorer.cs
index 2c5fa20..4c8fe5e 100644
--- a/src/core/Search/ReqExclScorer.cs
+++ b/src/core/Search/ReqExclScorer.cs
@@ -120,14 +120,20 @@ namespace Lucene.Net.Search
 			return reqScorer.Score(); // reqScorer may be null when next() or skipTo() already return false
 		}
 		
-        public override int Freq()
+        public override int Freq
         {
-            return reqScorer.Freq();
+            get
+            {
+                return reqScorer.Freq();
+            }
         }
 
-        public override ICollection<ChildScorer> GetChildren()
+        public override ICollection<ChildScorer> Children
         {
-            return new Collection<ChildScorer>(new [] {new ChildScorer(reqScorer, "FILTERED") } );
+            get
+            {
+                return new Collection<ChildScorer>(new[] { new ChildScorer(reqScorer, "FILTERED") });
+            }
         }  
 
 		public override int Advance(int target)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/ReqOptSumScorer.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ReqOptSumScorer.cs b/src/core/Search/ReqOptSumScorer.cs
index 9815bdb..632e90c 100644
--- a/src/core/Search/ReqOptSumScorer.cs
+++ b/src/core/Search/ReqOptSumScorer.cs
@@ -19,75 +19,79 @@ using System.Collections.Generic;
 
 namespace Lucene.Net.Search
 {
-	
-	/// <summary>A Scorer for queries with a required part and an optional part.
-	/// Delays skipTo() on the optional part until a score() is needed.
-	/// <br/>
-	/// This <c>Scorer</c> implements <see cref="DocIdSetIterator.Advance(int)" />.
-	/// </summary>
-	class ReqOptSumScorer:Scorer
-	{
-		/// <summary>The scorers passed from the constructor.
-		/// These are set to null as soon as their next() or skipTo() returns false.
-		/// </summary>
-		private Scorer reqScorer;
-		private Scorer optScorer;
-		
-		/// <summary>Construct a <c>ReqOptScorer</c>.</summary>
-		/// <param name="reqScorer">The required scorer. This must match.
-		/// </param>
-		/// <param name="optScorer">The optional scorer. This is used for scoring only.
-		/// </param>
-		public ReqOptSumScorer(Scorer reqScorer, Scorer optScorer):base(reqScorer.Weight)
-		{ // No similarity used.
-			this.reqScorer = reqScorer;
-			this.optScorer = optScorer;
-		}
-		
-		public override int NextDoc()
-		{
-			return reqScorer.NextDoc();
-		}
-		
-		public override int Advance(int target)
-		{
-			return reqScorer.Advance(target);
-		}
-		
-		public override int DocID
-		{
-		    get { return reqScorer.DocID; }
-		}
-		
-		/// <summary>Returns the score of the current document matching the query.
-		/// Initially invalid, until <see cref="NextDoc()" /> is called the first time.
-		/// </summary>
-		/// <returns> The score of the required scorer, eventually increased by the score
-		/// of the optional scorer when it also matches the current document.
-		/// </returns>
-		public override float Score()
-		{
-			int curDoc = reqScorer.DocID;
-			float reqScore = reqScorer.Score();
-			if (optScorer == null)
-			{
-				return reqScore;
-			}
-			
-			int optScorerDoc = optScorer.DocID;
-			if (optScorerDoc < curDoc && (optScorerDoc = optScorer.Advance(curDoc)) == NO_MORE_DOCS)
-			{
-				optScorer = null;
-				return reqScore;
-			}
-			
-			return optScorerDoc == curDoc?reqScore + optScorer.Score():reqScore;
-		}
 
-        public override int Freq()
+    /// <summary>A Scorer for queries with a required part and an optional part.
+    /// Delays skipTo() on the optional part until a score() is needed.
+    /// <br/>
+    /// This <c>Scorer</c> implements <see cref="DocIdSetIterator.Advance(int)" />.
+    /// </summary>
+    class ReqOptSumScorer : Scorer
+    {
+        /// <summary>The scorers passed from the constructor.
+        /// These are set to null as soon as their next() or skipTo() returns false.
+        /// </summary>
+        private Scorer reqScorer;
+        private Scorer optScorer;
+
+        /// <summary>Construct a <c>ReqOptScorer</c>.</summary>
+        /// <param name="reqScorer">The required scorer. This must match.
+        /// </param>
+        /// <param name="optScorer">The optional scorer. This is used for scoring only.
+        /// </param>
+        public ReqOptSumScorer(Scorer reqScorer, Scorer optScorer)
+            : base(reqScorer.Weight)
+        { // No similarity used.
+            this.reqScorer = reqScorer;
+            this.optScorer = optScorer;
+        }
+
+        public override int NextDoc()
+        {
+            return reqScorer.NextDoc();
+        }
+
+        public override int Advance(int target)
+        {
+            return reqScorer.Advance(target);
+        }
+
+        public override int DocID
+        {
+            get { return reqScorer.DocID; }
+        }
+
+        /// <summary>Returns the score of the current document matching the query.
+        /// Initially invalid, until <see cref="NextDoc()" /> is called the first time.
+        /// </summary>
+        /// <returns> The score of the required scorer, eventually increased by the score
+        /// of the optional scorer when it also matches the current document.
+        /// </returns>
+        public override float Score()
+        {
+            int curDoc = reqScorer.DocID;
+            float reqScore = reqScorer.Score();
+            if (optScorer == null)
+            {
+                return reqScore;
+            }
+
+            int optScorerDoc = optScorer.DocID;
+            if (optScorerDoc < curDoc && (optScorerDoc = optScorer.Advance(curDoc)) == NO_MORE_DOCS)
+            {
+                optScorer = null;
+                return reqScore;
+            }
+
+            return optScorerDoc == curDoc ? reqScore + optScorer.Score() : reqScore;
+        }
+
+        public override int Freq
         {
-            Score();
-            return (optScorer != null && optScorer.DocID == reqScorer.DocID) ? 2 : 1;
+            get
+            {
+                Score();
+                return (optScorer != null && optScorer.DocID == reqScorer.DocID) ? 2 : 1;
+            }
         }
 
         public ICollection<ChildScorer> GetChildren()
@@ -102,5 +106,5 @@ namespace Lucene.Net.Search
         {
             get { return reqScorer.Cost; }
         }
-	}
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/ScoreCachingWrappingScorer.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ScoreCachingWrappingScorer.cs b/src/core/Search/ScoreCachingWrappingScorer.cs
index 6d9bfd6..aecfa92 100644
--- a/src/core/Search/ScoreCachingWrappingScorer.cs
+++ b/src/core/Search/ScoreCachingWrappingScorer.cs
@@ -61,9 +61,12 @@ namespace Lucene.Net.Search
 			return curScore;
 		}
 		
-        public override int Freq()
+        public override int Freq
         {
-            return scorer.Freq();
+            get
+            {
+                return scorer.Freq();
+            }
         }
 
 		public override int DocID
@@ -86,11 +89,14 @@ namespace Lucene.Net.Search
 			return scorer.Advance(target);
 		}
 
-        public override ICollection<ChildScorer> GetChildren()
+        public override ICollection<ChildScorer> Children
         {
-            var list = new List<ChildScorer>(1);
-            list.Add(new ChildScorer(scorer, "CACHED"));
-            return list;
+            get
+            {
+                var list = new List<ChildScorer>(1);
+                list.Add(new ChildScorer(scorer, "CACHED"));
+                return list;
+            }
         }
 
 	    public override long Cost

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/ScoringRewrite.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ScoringRewrite.cs b/src/core/Search/ScoringRewrite.cs
index 4f7f1a8..ae5c45a 100644
--- a/src/core/Search/ScoringRewrite.cs
+++ b/src/core/Search/ScoringRewrite.cs
@@ -11,9 +11,12 @@ namespace Lucene.Net.Search
 
         private class AnonymounsScoringBooleanQueryRewrite : ScoringRewrite<BooleanQuery>
         {
-            protected override BooleanQuery GetTopLevelQuery()
+            protected override BooleanQuery TopLevelQuery
             {
-                return new BooleanQuery(true);
+                get
+                {
+                    return new BooleanQuery(true);
+                }
             }
 
             protected override void AddClause(BooleanQuery topLevel, Term term, int docCount,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Similarities/BM25Similarity.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Similarities/BM25Similarity.cs b/src/core/Search/Similarities/BM25Similarity.cs
index bb4267b..7cfd013 100644
--- a/src/core/Search/Similarities/BM25Similarity.cs
+++ b/src/core/Search/Similarities/BM25Similarity.cs
@@ -15,8 +15,8 @@ namespace Lucene.Net.Search.Similarities
         {
             for (int i = 0; i < 256; i++)
             {
-                float f = SmallFloat.Byte315ToFloat((sbyte) i);
-                NORM_TABLE[i] = 1.0f/(f*f);
+                float f = SmallFloat.Byte315ToFloat((sbyte)i);
+                NORM_TABLE[i] = 1.0f / (f * f);
             }
         }
 
@@ -50,12 +50,12 @@ namespace Lucene.Net.Search.Similarities
 
         protected virtual float Idf(long docFreq, long numDocs)
         {
-            return (float) Math.Log(1 + (numDocs - docFreq + 0.5D)/(docFreq + 0.5D));
+            return (float)Math.Log(1 + (numDocs - docFreq + 0.5D) / (docFreq + 0.5D));
         }
 
         protected virtual float SloppyFreq(int distance)
         {
-            return 1.0f/(distance + 1);
+            return 1.0f / (distance + 1);
         }
 
         protected virtual float ScorePayload(int doc, int start, int end, BytesRef payload)
@@ -69,12 +69,12 @@ namespace Lucene.Net.Search.Similarities
             if (sumTotalTermFreq <= 0)
                 return 1f;
             else
-                return (float) (sumTotalTermFreq/(double) collectionStats.MaxDoc);
+                return (float)(sumTotalTermFreq / (double)collectionStats.MaxDoc);
         }
 
         protected virtual sbyte EncodeNormValue(float boost, int fieldLength)
         {
-            return SmallFloat.FloatToByte315(boost/(float) Math.Sqrt(fieldLength));
+            return SmallFloat.FloatToByte315(boost / (float)Math.Sqrt(fieldLength));
         }
 
         protected virtual float DecodeNormValue(sbyte b)
@@ -125,15 +125,15 @@ namespace Lucene.Net.Search.Similarities
             var cache = new float[256];
             for (int i = 0; i < cache.Length; i++)
             {
-                cache[i] = k1*((1 - b) + b*DecodeNormValue((sbyte) i)/avgdl);
+                cache[i] = k1 * ((1 - b) + b * DecodeNormValue((sbyte)i) / avgdl);
             }
             return new BM25Stats(collectionStats.Field, idf, queryBoost, avgdl, cache);
         }
 
         public override sealed ExactSimScorer GetExactSimScorer(SimWeight stats, AtomicReaderContext context)
         {
-            var bm25stats = (BM25Stats) stats;
-            NumericDocValues norms = context.Reader.GetNormValues(bm25stats.Field);
+            var bm25stats = (BM25Stats)stats;
+            NumericDocValues norms = ((AtomicReader)context.Reader).GetNormValues(bm25stats.Field);
             return norms == null
                        ? new ExactBM25DocScorerNoNorms(bm25stats, this)
                        : new ExactBM25DocScorer(bm25stats, norms, this) as ExactSimScorer;
@@ -141,38 +141,38 @@ namespace Lucene.Net.Search.Similarities
 
         public override sealed SloppySimScorer GetSloppySimScorer(SimWeight stats, AtomicReaderContext context)
         {
-            var bm25stats = (BM25Stats) stats;
-            return new SloppyBM25DocScorer(bm25stats, context.Reader.GetNormValues(bm25stats.Field), this);
+            var bm25stats = (BM25Stats)stats;
+            return new SloppyBM25DocScorer(bm25stats, ((AtomicReader)context.Reader).GetNormValues(bm25stats.Field), this);
         }
 
         private Explanation ExplainScore(int doc, Explanation freq, BM25Stats stats, NumericDocValues norms)
         {
-            var result = new Explanation {Description = "score(doc=" + doc + ",freq=" + freq + "), product of:"};
+            var result = new Explanation { Description = "score(doc=" + doc + ",freq=" + freq + "), product of:" };
 
-            var boostExpl = new Explanation(stats.QueryBoost*stats.TopLevelBoost, "boost");
+            var boostExpl = new Explanation(stats.QueryBoost * stats.TopLevelBoost, "boost");
             if (boostExpl.Value != 1.0f)
                 result.AddDetail(boostExpl);
 
             result.AddDetail(stats.Idf);
 
-            var tfNormExpl = new Explanation {Description = "tfNorm, computed from:"};
+            var tfNormExpl = new Explanation { Description = "tfNorm, computed from:" };
             tfNormExpl.AddDetail(freq);
             tfNormExpl.AddDetail(new Explanation(k1, "parameter k1"));
             if (norms == null)
             {
                 tfNormExpl.AddDetail(new Explanation(0, "parameter b (norms omitted for field)"));
-                tfNormExpl.Value = (freq.Value*(k1 + 1))/(freq.Value + k1);
+                tfNormExpl.Value = (freq.Value * (k1 + 1)) / (freq.Value + k1);
             }
             else
             {
-                float doclen = DecodeNormValue((sbyte) norms.Get(doc));
+                float doclen = DecodeNormValue((sbyte)norms.Get(doc));
                 tfNormExpl.AddDetail(new Explanation(b, "parameter b"));
                 tfNormExpl.AddDetail(new Explanation(stats.Avgdl, "avgFieldLength"));
                 tfNormExpl.AddDetail(new Explanation(doclen, "fieldLength"));
-                tfNormExpl.Value = (freq.Value*(k1 + 1))/(freq.Value + k1*(1 - b + b*doclen/stats.Avgdl));
+                tfNormExpl.Value = (freq.Value * (k1 + 1)) / (freq.Value + k1 * (1 - b + b * doclen / stats.Avgdl));
             }
             result.AddDetail(tfNormExpl);
-            result.Value = boostExpl.Value*stats.Idf.Value*tfNormExpl.Value;
+            result.Value = boostExpl.Value * stats.Idf.Value * tfNormExpl.Value;
             return result;
         }
 
@@ -248,18 +248,21 @@ namespace Lucene.Net.Search.Similarities
                 get { return cache; }
             }
 
-            public override float GetValueForNormalization()
+            public override float ValueForNormalization
             {
-                // we return a TF-IDF like normalization to be nice, but we don't actually normalize ourselves.
-                float queryWeight = idf.Value*queryBoost;
-                return queryWeight*queryWeight;
+                get
+                {
+                    // we return a TF-IDF like normalization to be nice, but we don't actually normalize ourselves.
+                    float queryWeight = idf.Value * queryBoost;
+                    return queryWeight * queryWeight;
+                }
             }
 
             public override void Normalize(float queryNorm, float topLevelBoost)
             {
                 // we don't normalize with queryNorm at all, we just capture the top-level boost
                 this.topLevelBoost = topLevelBoost;
-                weight = idf.Value*queryBoost*topLevelBoost;
+                weight = idf.Value * queryBoost * topLevelBoost;
             }
         }
 
@@ -275,7 +278,7 @@ namespace Lucene.Net.Search.Similarities
             {
                 //assert norms != null;
                 this.stats = stats;
-                weightValue = stats.Weight*(parent.k1 + 1); // boost * idf * (k1 + 1)
+                weightValue = stats.Weight * (parent.k1 + 1); // boost * idf * (k1 + 1)
                 cache = stats.Cache;
                 this.norms = norms;
                 this.parent = parent;
@@ -283,7 +286,7 @@ namespace Lucene.Net.Search.Similarities
 
             public override float Score(int doc, int freq)
             {
-                return weightValue*freq/(freq + cache[(byte) norms.Get(doc) & 0xFF]);
+                return weightValue * freq / (freq + cache[(byte)norms.Get(doc) & 0xFF]);
             }
 
             public override Explanation Explain(int doc, Explanation freq)
@@ -303,9 +306,9 @@ namespace Lucene.Net.Search.Similarities
             public ExactBM25DocScorerNoNorms(BM25Stats stats, BM25Similarity parent)
             {
                 this.stats = stats;
-                weightValue = stats.Weight*(parent.k1 + 1); // boost * idf * (k1 + 1)
+                weightValue = stats.Weight * (parent.k1 + 1); // boost * idf * (k1 + 1)
                 for (int i = 0; i < SCORE_CACHE_SIZE; i++)
-                    scoreCache[i] = weightValue*i/(i + parent.k1);
+                    scoreCache[i] = weightValue * i / (i + parent.k1);
                 this.parent = parent;
             }
 
@@ -314,7 +317,7 @@ namespace Lucene.Net.Search.Similarities
                 // TODO: maybe score cache is more trouble than its worth?
                 return freq < SCORE_CACHE_SIZE // check cache
                            ? scoreCache[freq] // cache hit
-                           : weightValue*freq/(freq + parent.k1); // cache miss
+                           : weightValue * freq / (freq + parent.k1); // cache miss
             }
 
             public override Explanation Explain(int doc, Explanation freq)
@@ -334,7 +337,7 @@ namespace Lucene.Net.Search.Similarities
             public SloppyBM25DocScorer(BM25Stats stats, NumericDocValues norms, BM25Similarity parent)
             {
                 this.stats = stats;
-                weightValue = stats.Weight*(parent.k1 + 1);
+                weightValue = stats.Weight * (parent.k1 + 1);
                 cache = stats.Cache;
                 this.norms = norms;
                 this.parent = parent;
@@ -343,8 +346,8 @@ namespace Lucene.Net.Search.Similarities
             public override float Score(int doc, float freq)
             {
                 // if there are no norms, we act as if b=0
-                float norm = norms == null ? parent.k1 : cache[(byte) norms.Get(doc) & 0xFF];
-                return weightValue*freq/(freq + norm);
+                float norm = norms == null ? parent.k1 : cache[(byte)norms.Get(doc) & 0xFF];
+                return weightValue * freq / (freq + norm);
             }
 
             public override Explanation Explain(int doc, Explanation freq)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Similarities/BasicStats.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Similarities/BasicStats.cs b/src/core/Search/Similarities/BasicStats.cs
index 4739c7a..9b2e6bc 100644
--- a/src/core/Search/Similarities/BasicStats.cs
+++ b/src/core/Search/Similarities/BasicStats.cs
@@ -37,9 +37,12 @@ namespace Lucene.Net.Search.Similarities
             get { return queryBoost; }
         }
 
-        public override float GetValueForNormalization()
+        public override float ValueForNormalization
         {
-            return RawNormalizationValue*RawNormalizationValue;
+            get
+            {
+                return RawNormalizationValue * RawNormalizationValue;
+            }
         }
 
         public override void Normalize(float queryNorm, float topLevelBoost)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Similarities/MultiSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Similarities/MultiSimilarity.cs b/src/core/Search/Similarities/MultiSimilarity.cs
index 4e7ef5a..9ec6c63 100644
--- a/src/core/Search/Similarities/MultiSimilarity.cs
+++ b/src/core/Search/Similarities/MultiSimilarity.cs
@@ -118,10 +118,13 @@ namespace Lucene.Net.Search.Similarities
                 this.subStats = subStats;
             }
 
-            public override float GetValueForNormalization()
+            public override float ValueForNormalization
             {
-                float sum = subStats.Sum(stat => stat.GetValueForNormalization());
-                return sum/subStats.Length;
+                get
+                {
+                    float sum = subStats.Sum(stat => stat.ValueForNormalization);
+                    return sum / subStats.Length;
+                }
             }
 
             public override void Normalize(float queryNorm, float topLevelBoost)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Similarities/PerFieldSimilarityWrapper.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Similarities/PerFieldSimilarityWrapper.cs b/src/core/Search/Similarities/PerFieldSimilarityWrapper.cs
index 8caa8f9..35f85b5 100644
--- a/src/core/Search/Similarities/PerFieldSimilarityWrapper.cs
+++ b/src/core/Search/Similarities/PerFieldSimilarityWrapper.cs
@@ -37,9 +37,12 @@ namespace Lucene.Net.Search.Similarities
             internal Similarity Delegate;
             internal SimWeight DelegateWeight;
 
-            public override float GetValueForNormalization()
+            public override float ValueForNormalization
             {
-                return DelegateWeight.GetValueForNormalization();
+                get
+                {
+                    return DelegateWeight.ValueForNormalization;
+                }
             }
 
             public override void Normalize(float queryNorm, float topLevelBoost)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Search/Similarities/TFIDFSimilarity.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/Similarities/TFIDFSimilarity.cs b/src/core/Search/Similarities/TFIDFSimilarity.cs
index 275c25f..232742a 100644
--- a/src/core/Search/Similarities/TFIDFSimilarity.cs
+++ b/src/core/Search/Similarities/TFIDFSimilarity.cs
@@ -227,10 +227,13 @@ namespace Lucene.Net.Search.Similarities
                 get { return value; }
             }
 
-            public override float GetValueForNormalization()
+            public override float ValueForNormalization
             {
-                // TODO: (sorta LUCENE-1907) make non-static class and expose this squaring via a nice method to subclasses?
-                return queryWeight*queryWeight; // sum of squared weights
+                get
+                {
+                    // TODO: (sorta LUCENE-1907) make non-static class and expose this squaring via a nice method to subclasses?
+                    return queryWeight * queryWeight; // sum of squared weights
+                }
             }
 
             public override void Normalize(float queryNorm, float topLevelBoost)


[47/50] [abbrv] git commit: Fix byte count logic

Posted by mh...@apache.org.
Fix byte count logic


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/96a95e35
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/96a95e35
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/96a95e35

Branch: refs/heads/branch_4x
Commit: 96a95e35bf40ee9a3d9a98484f08e7c9ffaca2f0
Parents: 86087f1
Author: Paul Irwin <pa...@gmail.com>
Authored: Mon Aug 12 11:30:57 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Mon Aug 12 11:30:57 2013 -0400

----------------------------------------------------------------------
 src/contrib/Analyzers/Core/KeywordTokenizer.cs | 2 +-
 src/contrib/Analyzers/Util/CharacterUtils.cs   | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96a95e35/src/contrib/Analyzers/Core/KeywordTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Core/KeywordTokenizer.cs b/src/contrib/Analyzers/Core/KeywordTokenizer.cs
index 83ba155..ecf12df 100644
--- a/src/contrib/Analyzers/Core/KeywordTokenizer.cs
+++ b/src/contrib/Analyzers/Core/KeywordTokenizer.cs
@@ -60,7 +60,7 @@ namespace Lucene.Net.Analysis.Core
                 while (true)
                 {
                     int length = input.Read(buffer, upto, buffer.Length - upto);
-                    if (length == -1) break;
+                    if (length <= 0) break;
                     upto += length;
                     if (upto == buffer.Length)
                         buffer = termAtt.ResizeBuffer(1 + buffer.Length);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/96a95e35/src/contrib/Analyzers/Util/CharacterUtils.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Util/CharacterUtils.cs b/src/contrib/Analyzers/Util/CharacterUtils.cs
index 5fdc78f..ecb32ba 100644
--- a/src/contrib/Analyzers/Util/CharacterUtils.cs
+++ b/src/contrib/Analyzers/Util/CharacterUtils.cs
@@ -72,7 +72,7 @@ namespace Lucene.Net.Analysis.Util
             {
                 buffer.offset = 0;
                 int read = reader.Read(buffer.buffer, 0, buffer.length);
-                if (read == -1)
+                if (read <= 0)
                     return false;
                 buffer.length = read;
                 return true;


[38/50] [abbrv] Implement Standard and Classic Analyzers

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/7a4b442f/src/contrib/Analyzers/Standard/Std34/StandardTokenizerImpl34.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Analyzers/Standard/Std34/StandardTokenizerImpl34.cs b/src/contrib/Analyzers/Standard/Std34/StandardTokenizerImpl34.cs
new file mode 100644
index 0000000..bdaf2aa
--- /dev/null
+++ b/src/contrib/Analyzers/Standard/Std34/StandardTokenizerImpl34.cs
@@ -0,0 +1,1134 @@
+using Lucene.Net.Analysis.Tokenattributes;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Analysis.Standard.Std34
+{
+    public sealed class StandardTokenizerImpl34 : IStandardTokenizerInterface
+    {
+        /** This character denotes the end of file */
+        public const int YYEOF = -1;
+
+        /** initial size of the lookahead buffer */
+        private const int ZZ_BUFFERSIZE = 4096;
+
+        /** lexical states */
+        public const int YYINITIAL = 0;
+
+        /**
+         * ZZ_LEXSTATE[l] is the state in the DFA for the lexical state l
+         * ZZ_LEXSTATE[l+1] is the state in the DFA for the lexical state l
+         *                  at the beginning of a line
+         * l is of the form l = 2*k, k a non negative integer
+         */
+        private static readonly int[] ZZ_LEXSTATE = { 
+     0, 0
+  };
+
+        /** 
+         * Translates characters to character classes
+         */
+        private const String ZZ_CMAP_PACKED =
+          "\u0027\0\u0001\u0060\u0004\0\u0001\u005f\u0001\0\u0001\u0060\u0001\0\u000a\u005c\u0001\u005e\u0001\u005f" +
+          "\u0005\0\u001a\u005a\u0004\0\u0001\u0061\u0001\0\u001a\u005a\u002f\0\u0001\u005a\u0002\0\u0001\u005b" +
+          "\u0007\0\u0001\u005a\u0001\0\u0001\u005e\u0002\0\u0001\u005a\u0005\0\u0017\u005a\u0001\0\u001f\u005a" +
+          "\u0001\0\u01ca\u005a\u0004\0\u000c\u005a\u000e\0\u0005\u005a\u0007\0\u0001\u005a\u0001\0\u0001\u005a" +
+          "\u0011\0\u0070\u005b\u0005\u005a\u0001\0\u0002\u005a\u0002\0\u0004\u005a\u0001\u005f\u0007\0\u0001\u005a" +
+          "\u0001\u005e\u0003\u005a\u0001\0\u0001\u005a\u0001\0\u0014\u005a\u0001\0\u0053\u005a\u0001\0\u008b\u005a" +
+          "\u0001\0\u0007\u005b\u009e\u005a\u0009\0\u0026\u005a\u0002\0\u0001\u005a\u0007\0\u0027\u005a\u0001\0" +
+          "\u0001\u005f\u0007\0\u002d\u005b\u0001\0\u0001\u005b\u0001\0\u0002\u005b\u0001\0\u0002\u005b\u0001\0" +
+          "\u0001\u005b\u0008\0\u001b\u005a\u0005\0\u0004\u005a\u0001\u005e\u000b\0\u0004\u005b\u0008\0\u0002\u005f" +
+          "\u0002\0\u000b\u005b\u0005\0\u002b\u005a\u0015\u005b\u000a\u005c\u0001\0\u0001\u005c\u0001\u005f\u0001\0" +
+          "\u0002\u005a\u0001\u005b\u0063\u005a\u0001\0\u0001\u005a\u0007\u005b\u0001\u005b\u0001\0\u0006\u005b\u0002\u005a" +
+          "\u0002\u005b\u0001\0\u0004\u005b\u0002\u005a\u000a\u005c\u0003\u005a\u0002\0\u0001\u005a\u000f\0\u0001\u005b" +
+          "\u0001\u005a\u0001\u005b\u001e\u005a\u001b\u005b\u0002\0\u0059\u005a\u000b\u005b\u0001\u005a\u000e\0\u000a\u005c" +
+          "\u0021\u005a\u0009\u005b\u0002\u005a\u0002\0\u0001\u005f\u0001\0\u0001\u005a\u0005\0\u0016\u005a\u0004\u005b" +
+          "\u0001\u005a\u0009\u005b\u0001\u005a\u0003\u005b\u0001\u005a\u0005\u005b\u0012\0\u0019\u005a\u0003\u005b\u00a4\0" +
+          "\u0004\u005b\u0036\u005a\u0003\u005b\u0001\u005a\u0012\u005b\u0001\u005a\u0007\u005b\u000a\u005a\u0002\u005b\u0002\0" +
+          "\u000a\u005c\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0003\u005b\u0001\0\u0008\u005a\u0002\0" +
+          "\u0002\u005a\u0002\0\u0016\u005a\u0001\0\u0007\u005a\u0001\0\u0001\u005a\u0003\0\u0004\u005a\u0002\0" +
+          "\u0001\u005b\u0001\u005a\u0007\u005b\u0002\0\u0002\u005b\u0002\0\u0003\u005b\u0001\u005a\u0008\0\u0001\u005b" +
+          "\u0004\0\u0002\u005a\u0001\0\u0003\u005a\u0002\u005b\u0002\0\u000a\u005c\u0002\u005a\u000f\0\u0003\u005b" +
+          "\u0001\0\u0006\u005a\u0004\0\u0002\u005a\u0002\0\u0016\u005a\u0001\0\u0007\u005a\u0001\0\u0002\u005a" +
+          "\u0001\0\u0002\u005a\u0001\0\u0002\u005a\u0002\0\u0001\u005b\u0001\0\u0005\u005b\u0004\0\u0002\u005b" +
+          "\u0002\0\u0003\u005b\u0003\0\u0001\u005b\u0007\0\u0004\u005a\u0001\0\u0001\u005a\u0007\0\u000a\u005c" +
+          "\u0002\u005b\u0003\u005a\u0001\u005b\u000b\0\u0003\u005b\u0001\0\u0009\u005a\u0001\0\u0003\u005a\u0001\0" +
+          "\u0016\u005a\u0001\0\u0007\u005a\u0001\0\u0002\u005a\u0001\0\u0005\u005a\u0002\0\u0001\u005b\u0001\u005a" +
+          "\u0008\u005b\u0001\0\u0003\u005b\u0001\0\u0003\u005b\u0002\0\u0001\u005a\u000f\0\u0002\u005a\u0002\u005b" +
+          "\u0002\0\u000a\u005c\u0011\0\u0003\u005b\u0001\0\u0008\u005a\u0002\0\u0002\u005a\u0002\0\u0016\u005a" +
+          "\u0001\0\u0007\u005a\u0001\0\u0002\u005a\u0001\0\u0005\u005a\u0002\0\u0001\u005b\u0001\u005a\u0007\u005b" +
+          "\u0002\0\u0002\u005b\u0002\0\u0003\u005b\u0008\0\u0002\u005b\u0004\0\u0002\u005a\u0001\0\u0003\u005a" +
+          "\u0002\u005b\u0002\0\u000a\u005c\u0001\0\u0001\u005a\u0010\0\u0001\u005b\u0001\u005a\u0001\0\u0006\u005a" +
+          "\u0003\0\u0003\u005a\u0001\0\u0004\u005a\u0003\0\u0002\u005a\u0001\0\u0001\u005a\u0001\0\u0002\u005a" +
+          "\u0003\0\u0002\u005a\u0003\0\u0003\u005a\u0003\0\u000c\u005a\u0004\0\u0005\u005b\u0003\0\u0003\u005b" +
+          "\u0001\0\u0004\u005b\u0002\0\u0001\u005a\u0006\0\u0001\u005b\u000e\0\u000a\u005c\u0011\0\u0003\u005b" +
+          "\u0001\0\u0008\u005a\u0001\0\u0003\u005a\u0001\0\u0017\u005a\u0001\0\u000a\u005a\u0001\0\u0005\u005a" +
+          "\u0003\0\u0001\u005a\u0007\u005b\u0001\0\u0003\u005b\u0001\0\u0004\u005b\u0007\0\u0002\u005b\u0001\0" +
+          "\u0002\u005a\u0006\0\u0002\u005a\u0002\u005b\u0002\0\u000a\u005c\u0012\0\u0002\u005b\u0001\0\u0008\u005a" +
+          "\u0001\0\u0003\u005a\u0001\0\u0017\u005a\u0001\0\u000a\u005a\u0001\0\u0005\u005a\u0002\0\u0001\u005b" +
+          "\u0001\u005a\u0007\u005b\u0001\0\u0003\u005b\u0001\0\u0004\u005b\u0007\0\u0002\u005b\u0007\0\u0001\u005a" +
+          "\u0001\0\u0002\u005a\u0002\u005b\u0002\0\u000a\u005c\u0001\0\u0002\u005a\u000f\0\u0002\u005b\u0001\0" +
+          "\u0008\u005a\u0001\0\u0003\u005a\u0001\0\u0029\u005a\u0002\0\u0001\u005a\u0007\u005b\u0001\0\u0003\u005b" +
+          "\u0001\0\u0004\u005b\u0001\u005a\u0008\0\u0001\u005b\u0008\0\u0002\u005a\u0002\u005b\u0002\0\u000a\u005c" +
+          "\u000a\0\u0006\u005a\u0002\0\u0002\u005b\u0001\0\u0012\u005a\u0003\0\u0018\u005a\u0001\0\u0009\u005a" +
+          "\u0001\0\u0001\u005a\u0002\0\u0007\u005a\u0003\0\u0001\u005b\u0004\0\u0006\u005b\u0001\0\u0001\u005b" +
+          "\u0001\0\u0008\u005b\u0012\0\u0002\u005b\u000d\0\u0030\u0062\u0001\u0063\u0002\u0062\u0007\u0063\u0005\0" +
+          "\u0007\u0062\u0008\u0063\u0001\0\u000a\u005c\u0027\0\u0002\u0062\u0001\0\u0001\u0062\u0002\0\u0002\u0062" +
+          "\u0001\0\u0001\u0062\u0002\0\u0001\u0062\u0006\0\u0004\u0062\u0001\0\u0007\u0062\u0001\0\u0003\u0062" +
+          "\u0001\0\u0001\u0062\u0001\0\u0001\u0062\u0002\0\u0002\u0062\u0001\0\u0004\u0062\u0001\u0063\u0002\u0062" +
+          "\u0006\u0063\u0001\0\u0002\u0063\u0001\u0062\u0002\0\u0005\u0062\u0001\0\u0001\u0062\u0001\0\u0006\u0063" +
+          "\u0002\0\u000a\u005c\u0002\0\u0002\u0062\u0022\0\u0001\u005a\u0017\0\u0002\u005b\u0006\0\u000a\u005c" +
+          "\u000b\0\u0001\u005b\u0001\0\u0001\u005b\u0001\0\u0001\u005b\u0004\0\u0002\u005b\u0008\u005a\u0001\0" +
+          "\u0024\u005a\u0004\0\u0014\u005b\u0001\0\u0002\u005b\u0005\u005a\u000b\u005b\u0001\0\u0024\u005b\u0009\0" +
+          "\u0001\u005b\u0039\0\u002b\u0062\u0014\u0063\u0001\u0062\u000a\u005c\u0006\0\u0006\u0062\u0004\u0063\u0004\u0062" +
+          "\u0003\u0063\u0001\u0062\u0003\u0063\u0002\u0062\u0007\u0063\u0003\u0062\u0004\u0063\u000d\u0062\u000c\u0063\u0001\u0062" +
+          "\u0001\u0063\u000a\u005c\u0004\u0063\u0002\u0062\u0026\u005a\u000a\0\u002b\u005a\u0001\0\u0001\u005a\u0003\0" +
+          "\u0100\u0066\u0049\u005a\u0001\0\u0004\u005a\u0002\0\u0007\u005a\u0001\0\u0001\u005a\u0001\0\u0004\u005a" +
+          "\u0002\0\u0029\u005a\u0001\0\u0004\u005a\u0002\0\u0021\u005a\u0001\0\u0004\u005a\u0002\0\u0007\u005a" +
+          "\u0001\0\u0001\u005a\u0001\0\u0004\u005a\u0002\0\u000f\u005a\u0001\0\u0039\u005a\u0001\0\u0004\u005a" +
+          "\u0002\0\u0043\u005a\u0002\0\u0003\u005b\u0020\0\u0010\u005a\u0010\0\u0055\u005a\u000c\0\u026c\u005a" +
+          "\u0002\0\u0011\u005a\u0001\0\u001a\u005a\u0005\0\u004b\u005a\u0003\0\u0003\u005a\u000f\0\u000d\u005a" +
+          "\u0001\0\u0004\u005a\u0003\u005b\u000b\0\u0012\u005a\u0003\u005b\u000b\0\u0012\u005a\u0002\u005b\u000c\0" +
+          "\u000d\u005a\u0001\0\u0003\u005a\u0001\0\u0002\u005b\u000c\0\u0034\u0062\u0002\u0063\u001e\u0063\u0003\0" +
+          "\u0001\u0062\u0004\0\u0001\u0062\u0001\u0063\u0002\0\u000a\u005c\u0021\0\u0003\u005b\u0002\0\u000a\u005c" +
+          "\u0006\0\u0058\u005a\u0008\0\u0029\u005a\u0001\u005b\u0001\u005a\u0005\0\u0046\u005a\u000a\0\u001d\u005a" +
+          "\u0003\0\u000c\u005b\u0004\0\u000c\u005b\u000a\0\u000a\u005c\u001e\u0062\u0002\0\u0005\u0062\u000b\0" +
+          "\u002c\u0062\u0004\0\u0011\u0063\u0007\u0062\u0002\u0063\u0006\0\u000a\u005c\u0001\u0062\u0003\0\u0002\u0062" +
+          "\u0020\0\u0017\u005a\u0005\u005b\u0004\0\u0035\u0062\u000a\u0063\u0001\0\u001d\u0063\u0002\0\u0001\u005b" +
+          "\u000a\u005c\u0006\0\u000a\u005c\u0006\0\u000e\u0062\u0052\0\u0005\u005b\u002f\u005a\u0011\u005b\u0007\u005a" +
+          "\u0004\0\u000a\u005c\u0011\0\u0009\u005b\u000c\0\u0003\u005b\u001e\u005a\u000a\u005b\u0003\0\u0002\u005a" +
+          "\u000a\u005c\u0006\0\u0026\u005a\u000e\u005b\u000c\0\u0024\u005a\u0014\u005b\u0008\0\u000a\u005c\u0003\0" +
+          "\u0003\u005a\u000a\u005c\u0024\u005a\u0052\0\u0003\u005b\u0001\0\u0015\u005b\u0004\u005a\u0001\u005b\u0004\u005a" +
+          "\u0001\u005b\u000d\0\u00c0\u005a\u0027\u005b\u0015\0\u0004\u005b\u0116\u005a\u0002\0\u0006\u005a\u0002\0" +
+          "\u0026\u005a\u0002\0\u0006\u005a\u0002\0\u0008\u005a\u0001\0\u0001\u005a\u0001\0\u0001\u005a\u0001\0" +
+          "\u0001\u005a\u0001\0\u001f\u005a\u0002\0\u0035\u005a\u0001\0\u0007\u005a\u0001\0\u0001\u005a\u0003\0" +
+          "\u0003\u005a\u0001\0\u0007\u005a\u0003\0\u0004\u005a\u0002\0\u0006\u005a\u0004\0\u000d\u005a\u0005\0" +
+          "\u0003\u005a\u0001\0\u0007\u005a\u000f\0\u0002\u005b\u0002\u005b\u0008\0\u0002\u0060\u000a\0\u0001\u0060" +
+          "\u0002\0\u0001\u005e\u0002\0\u0005\u005b\u0010\0\u0002\u0061\u0003\0\u0001\u005f\u000f\0\u0001\u0061" +
+          "\u000b\0\u0005\u005b\u0005\0\u0006\u005b\u0001\0\u0001\u005a\u000d\0\u0001\u005a\u0010\0\u000d\u005a" +
+          "\u0033\0\u0021\u005b\u0011\0\u0001\u005a\u0004\0\u0001\u005a\u0002\0\u000a\u005a\u0001\0\u0001\u005a" +
+          "\u0003\0\u0005\u005a\u0006\0\u0001\u005a\u0001\0\u0001\u005a\u0001\0\u0001\u005a\u0001\0\u0004\u005a" +
+          "\u0001\0\u000b\u005a\u0002\0\u0004\u005a\u0005\0\u0005\u005a\u0004\0\u0001\u005a\u0011\0\u0029\u005a" +
+          "\u032d\0\u0034\u005a\u0716\0\u002f\u005a\u0001\0\u002f\u005a\u0001\0\u0085\u005a\u0006\0\u0004\u005a" +
+          "\u0003\u005b\u000e\0\u0026\u005a\u000a\0\u0036\u005a\u0009\0\u0001\u005a\u000f\0\u0001\u005b\u0017\u005a" +
+          "\u0009\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0007\u005a" +
+          "\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0007\u005a\u0001\0\u0020\u005b\u002f\0\u0001\u005a" +
+          "\u0050\0\u001a\u0064\u0001\0\u0059\u0064\u000c\0\u00d6\u0064\u002f\0\u0001\u005a\u0001\0\u0001\u0064" +
+          "\u0019\0\u0009\u0064\u0004\u005b\u0002\u005b\u0001\0\u0005\u005d\u0002\0\u0003\u0064\u0001\u005a\u0001\u005a" +
+          "\u0004\0\u0056\u0065\u0002\0\u0002\u005b\u0002\u005d\u0003\u0065\u005b\u005d\u0001\0\u0004\u005d\u0005\0" +
+          "\u0029\u005a\u0003\0\u005e\u0066\u0011\0\u001b\u005a\u0035\0\u0010\u005d\u001f\0\u0041\0\u001f\0" +
+          "\u0051\0\u002f\u005d\u0001\0\u0058\u005d\u00a8\0\u19b6\u0064\u004a\0\u51cc\u0064\u0034\0\u048d\u005a" +
+          "\u0043\0\u002e\u005a\u0002\0\u010d\u005a\u0003\0\u0010\u005a\u000a\u005c\u0002\u005a\u0014\0\u002f\u005a" +
+          "\u0004\u005b\u0009\0\u0002\u005b\u0001\0\u0019\u005a\u0008\0\u0050\u005a\u0002\u005b\u0025\0\u0009\u005a" +
+          "\u0002\0\u0067\u005a\u0002\0\u0004\u005a\u0001\0\u0002\u005a\u000e\0\u000a\u005a\u0050\0\u0008\u005a" +
+          "\u0001\u005b\u0003\u005a\u0001\u005b\u0004\u005a\u0001\u005b\u0017\u005a\u0005\u005b\u0018\0\u0034\u005a\u000c\0" +
+          "\u0002\u005b\u0032\u005a\u0011\u005b\u000b\0\u000a\u005c\u0006\0\u0012\u005b\u0006\u005a\u0003\0\u0001\u005a" +
+          "\u0004\0\u000a\u005c\u001c\u005a\u0008\u005b\u0002\0\u0017\u005a\u000d\u005b\u000c\0\u001d\u0066\u0003\0" +
+          "\u0004\u005b\u002f\u005a\u000e\u005b\u000e\0\u0001\u005a\u000a\u005c\u0026\0\u0029\u005a\u000e\u005b\u0009\0" +
+          "\u0003\u005a\u0001\u005b\u0008\u005a\u0002\u005b\u0002\0\u000a\u005c\u0006\0\u001b\u0062\u0001\u0063\u0004\0" +
+          "\u0030\u0062\u0001\u0063\u0001\u0062\u0003\u0063\u0002\u0062\u0002\u0063\u0005\u0062\u0002\u0063\u0001\u0062\u0001\u0063" +
+          "\u0001\u0062\u0018\0\u0005\u0062\u0021\0\u0006\u005a\u0002\0\u0006\u005a\u0002\0\u0006\u005a\u0009\0" +
+          "\u0007\u005a\u0001\0\u0007\u005a\u0091\0\u0023\u005a\u0008\u005b\u0001\0\u0002\u005b\u0002\0\u000a\u005c" +
+          "\u0006\0\u2ba4\u0066\u000c\0\u0017\u0066\u0004\0\u0031\u0066\u0004\0\u0001\u0019\u0001\u0015\u0001\u0026" +
+          "\u0001\u0023\u0001\u000b\u0003\0\u0001\u0007\u0001\u0005\u0002\0\u0001\u0003\u0001\u0001\u000c\0\u0001\u0009" +
+          "\u0011\0\u0001\u004a\u0007\0\u0001\u0035\u0001\u000f\u0006\0\u0001\u0058\u0003\0\u0001\u0050\u0001\u0050" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0051" +
+          "\u0001\u0050\u0001\u0050\u0001\u0050\u0001\u0055\u0001\u0053\u000f\0\u0001\u004c\u02c1\0\u0001\u0038\u00bf\0" +
+          "\u0001\u004b\u0001\u0039\u0001\u0002\u0003\u0054\u0002\u001d\u0001\u0054\u0001\u001d\u0002\u0054\u0001\u000c\u0011\u0054" +
+          "\u0002\u0030\u0007\u003b\u0001\u003a\u0007\u003b\u0007\u002a\u0001\u000d\u0001\u002a\u0001\u003d\u0002\u0025\u0001\u0024" +
+          "\u0001\u003d\u0001\u0025\u0001\u0024\u0008\u003d\u0002\u0033\u0005\u0031\u0002\u002c\u0005\u0031\u0001\u0006\u0008\u001f" +
+          "\u0005\u0011\u0003\u0017\u000a\u0046\u0010\u0017\u0003\u0022\u001a\u0018\u0001\u0016\u0002\u0014\u0002\u0048\u0001\u0049" +
+          "\u0002\u0048\u0002\u0049\u0002\u0048\u0001\u0049\u0003\u0014\u0001\u000e\u0002\u0014\u000a\u0034\u0001\u003c\u0001\u0021" +
+          "\u0001\u001c\u0001\u0034\u0006\u0021\u0001\u001c\u0036\u0021\u0005\u004d\u0006\u0043\u0001\u0029\u0004\u0043\u0002\u0029" +
+          "\u0008\u0043\u0001\u0029\u0007\u0040\u0001\u000a\u0002\u0040\u001a\u0043\u0001\u000a\u0004\u0040\u0001\u000a\u0005\u0042" +
+          "\u0001\u0041\u0001\u0042\u0003\u0041\u0007\u0042\u0001\u0041\u0013\u0042\u0005\u0037\u0003\u0042\u0006\u0037\u0002\u0037" +
+          "\u0006\u0036\u0008\u0036\u0002\u0040\u0007\u0036\u001e\u0040\u0004\u0036\u0042\u0040\u000d\u004d\u0001\u003f\u0002\u004d" +
+          "\u0001\u0059\u0003\u004f\u0001\u004d\u0002\u004f\u0005\u004d\u0004\u004f\u0004\u004e\u0001\u004d\u0003\u004e\u0001\u004d" +
+          "\u0005\u004e\u0016\u002e\u0004\u0013\u0001\u0045\u0002\u0044\u0004\u0052\u0001\u0044\u0002\u0052\u0003\u003e\u001b\u0052" +
+          "\u001d\u002d\u0003\u0052\u001d\u0056\u0003\u0052\u0006\u0056\u0002\u001b\u0019\u0056\u0001\u001b\u000f\u0056\u0006\u0052" +
+          "\u0004\u0012\u0001\u0008\u001f\u0012\u0001\u0008\u0004\u0012\u0015\u0032\u0001\u0057\u0009\u0032\u0011\u002d\u0005\u0032" +
+          "\u0001\u002f\u000a\u0020\u000b\u0032\u0004\u002d\u0001\u0028\u0006\u002d\u000a\u0052\u000f\u002d\u0001\u0027\u0003\u002b" +
+          "\u000d\u0010\u0009\u001e\u0001\u001a\u0014\u001e\u0002\u0010\u0009\u001e\u0001\u001a\u0019\u001e\u0001\u001a\u0004\u0010" +
+          "\u0004\u001e\u0002\u001a\u0002\u0047\u0001\u0004\u0005\u0047\u002a\u0004\u1900\0\u012e\u0064\u0002\0\u003e\u0064" +
+          "\u0002\0\u006a\u0064\u0026\0\u0007\u005a\u000c\0\u0005\u005a\u0005\0\u0001\u005a\u0001\u005b\u000a\u005a" +
+          "\u0001\0\u000d\u005a\u0001\0\u0005\u005a\u0001\0\u0001\u005a\u0001\0\u0002\u005a\u0001\0\u0002\u005a" +
+          "\u0001\0\u006c\u005a\u0021\0\u016b\u005a\u0012\0\u0040\u005a\u0002\0\u0036\u005a\u0028\0\u000c\u005a" +
+          "\u0004\0\u0010\u005b\u0001\u005f\u0002\0\u0001\u005e\u0001\u005f\u000b\0\u0007\u005b\u000c\0\u0002\u0061" +
+          "\u0018\0\u0003\u0061\u0001\u005f\u0001\0\u0001\u0060\u0001\0\u0001\u005f\u0001\u005e\u001a\0\u0005\u005a" +
+          "\u0001\0\u0087\u005a\u0002\0\u0001\u005b\u0007\0\u0001\u0060\u0004\0\u0001\u005f\u0001\0\u0001\u0060" +
+          "\u0001\0\u000a\u005c\u0001\u005e\u0001\u005f\u0005\0\u001a\u005a\u0004\0\u0001\u0061\u0001\0\u001a\u005a" +
+          "\u000b\0\u0038\u005d\u0002\u005b\u001f\u0066\u0003\0\u0006\u0066\u0002\0\u0006\u0066\u0002\0\u0006\u0066" +
+          "\u0002\0\u0003\u0066\u001c\0\u0003\u005b\u0004\0";
+
+        /** 
+         * Translates characters to character classes
+         */
+        private static readonly char[] ZZ_CMAP = zzUnpackCMap(ZZ_CMAP_PACKED);
+
+        /** 
+         * Translates DFA states to action switch labels.
+         */
+        private static readonly int[] ZZ_ACTION = zzUnpackAction();
+
+        private const String ZZ_ACTION_PACKED_0 =
+          "\u0001\0\u0013\u0001\u0001\u0002\u0001\u0003\u0001\u0004\u0001\u0001\u0001\u0005\u0001\u0006" +
+          "\u0001\u0007\u0001\u0008\u000d\0\u0001\u0002\u0001\0\u0001\u0002\u0008\0\u0001\u0003" +
+          "\u000d\0\u0001\u0002\u0039\0";
+
+        private static int[] zzUnpackAction()
+        {
+            int[] result = new int[124];
+            int offset = 0;
+            offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackAction(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+
+        /** 
+         * Translates a state to a row index in the transition table
+         */
+        private static readonly int[] ZZ_ROWMAP = zzUnpackRowMap();
+
+        private const String ZZ_ROWMAP_PACKED_0 =
+          "\0\0\0\u0067\0\u00ce\0\u0135\0\u019c\0\u0203\0\u026a\0\u02d1" +
+          "\0\u0338\0\u039f\0\u0406\0\u046d\0\u04d4\0\u053b\0\u05a2\0\u0609" +
+          "\0\u0670\0\u06d7\0\u073e\0\u07a5\0\u080c\0\u0873\0\u08da\0\u0941" +
+          "\0\u09a8\0\u0a0f\0\u0a76\0\u0add\0\u00ce\0\u0135\0\u019c\0\u0203" +
+          "\0\u026a\0\u0b44\0\u0bab\0\u0c12\0\u0c79\0\u046d\0\u0ce0\0\u0d47" +
+          "\0\u0dae\0\u0e15\0\u0e7c\0\u0ee3\0\u0f4a\0\u0338\0\u039f\0\u0fb1" +
+          "\0\u1018\0\u107f\0\u10e6\0\u114d\0\u11b4\0\u121b\0\u1282\0\u12e9" +
+          "\0\u1350\0\u13b7\0\u141e\0\u1485\0\u14ec\0\u1553\0\u15ba\0\u1621" +
+          "\0\u1688\0\u0941\0\u16ef\0\u1756\0\u17bd\0\u1824\0\u188b\0\u18f2" +
+          "\0\u1959\0\u19c0\0\u1a27\0\u1a8e\0\u1af5\0\u1b5c\0\u1bc3\0\u1c2a" +
+          "\0\u1c91\0\u1cf8\0\u1d5f\0\u1dc6\0\u1e2d\0\u1e94\0\u1efb\0\u1f62" +
+          "\0\u1fc9\0\u2030\0\u2097\0\u20fe\0\u2165\0\u21cc\0\u2233\0\u229a" +
+          "\0\u2301\0\u2368\0\u23cf\0\u2436\0\u249d\0\u2504\0\u256b\0\u25d2" +
+          "\0\u2639\0\u26a0\0\u2707\0\u276e\0\u27d5\0\u283c\0\u28a3\0\u290a" +
+          "\0\u2971\0\u29d8\0\u2a3f\0\u2aa6\0\u2b0d\0\u2b74\0\u2bdb\0\u2c42" +
+          "\0\u2ca9\0\u2d10\0\u2d77\0\u2dde";
+
+        private static int[] zzUnpackRowMap()
+        {
+            int[] result = new int[124];
+            int offset = 0;
+            offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackRowMap(String packed, int offset, int[] result)
+        {
+            int i = 0;  /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int high = packed[i++] << 16;
+                result[j++] = high | packed[i++];
+            }
+            return j;
+        }
+
+        /** 
+         * The transition table of the DFA
+         */
+        private static readonly int[] ZZ_TRANS = zzUnpackTrans();
+
+        private const String ZZ_TRANS_PACKED_0 =
+          "\u0001\u0002\u0001\u0003\u0001\u0002\u0001\u0004\u0001\u0002\u0001\u0005\u0001\u0002\u0001\u0006" +
+          "\u0001\u0002\u0001\u0007\u0001\u0002\u0001\u0008\u0003\u0002\u0001\u0009\u0005\u0002\u0001\u000a" +
+          "\u0003\u0002\u0001\u000b\u0009\u0002\u0001\u000c\u0002\u0002\u0001\u000d\u0023\u0002\u0001\u000e" +
+          "\u0001\u0002\u0001\u000f\u0003\u0002\u0001\u0010\u0001\u0011\u0001\u0002\u0001\u0012\u0001\u0002" +
+          "\u0001\u0013\u0002\u0002\u0001\u0014\u0001\u0002\u0001\u0015\u0001\u0002\u0001\u0016\u0001\u0017" +
+          "\u0003\u0002\u0001\u0018\u0002\u0019\u0001\u001a\u0001\u001b\u0001\u001c\u0069\0\u0001\u0015" +
+          "\u0009\0\u0001\u0015\u0010\0\u0001\u0015\u0012\0\u0001\u0015\u0008\0\u0003\u0015" +
+          "\u000f\0\u0001\u0015\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015" +
+          "\u0001\0\u0005\u0015\u0001\0\u0003\u0015\u0001\0\u0009\u0015\u0001\0\u0002\u0015" +
+          "\u0001\0\u000e\u0015\u0001\0\u0002\u0015\u0001\0\u0011\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0002\u0015" +
+          "\u0001\0\u0001\u0015\u000f\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0002\u0015" +
+          "\u0003\0\u0001\u0015\u000b\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0002\u0015" +
+          "\u0004\0\u0001\u0015\u0001\0\u0001\u0015\u0003\0\u0002\u0015\u0001\0\u0001\u0015" +
+          "\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u000d\0\u0001\u0015\u0008\0\u0001\u0015" +
+          "\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0002\0\u0004\u0015\u0001\0\u0003\u0015\u0002\0\u0003\u0015" +
+          "\u0001\0\u0004\u0015\u0001\0\u0002\u0015\u0002\0\u0003\u0015\u0001\0\u0009\u0015" +
+          "\u0001\0\u0002\u0015\u0001\0\u000e\u0015\u0001\0\u0002\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0002\u0015" +
+          "\u0001\0\u0001\u0015\u000f\0\u0001\u0015\u0003\0\u0001\u0015\u0003\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0002\u0015\u0001\0\u0003\u0015" +
+          "\u0003\0\u0002\u0015\u0001\0\u0001\u0015\u0001\0\u0002\u0015\u0001\0\u0002\u0015" +
+          "\u0003\0\u0002\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0002\u0015" +
+          "\u0001\0\u0002\u0015\u0001\0\u0002\u0015\u0001\0\u0005\u0015\u0001\0\u0005\u0015" +
+          "\u0001\0\u0002\u0015\u0001\0\u0002\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015" +
+          "\u0004\0\u0001\u0015\u0004\0\u0001\u0015\u0019\0\u0003\u0015\u0005\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u000c\0\u0001\u0015" +
+          "\u0005\0\u0001\u0015\u0009\0\u0002\u0015\u000a\0\u0001\u0016\u0001\0\u0002\u0015" +
+          "\u000a\0\u0001\u0015\u0014\0\u0001\u0015\u0001\0\u0001\u0016\u0007\0\u0002\u0015" +
+          "\u0002\0\u0005\u0015\u0002\0\u0002\u0015\u0004\0\u0006\u0015\u0001\0\u0002\u0015" +
+          "\u0004\0\u0005\u0015\u0001\0\u0005\u0015\u0001\0\u0002\u0015\u0001\0\u0003\u0015" +
+          "\u0001\0\u0004\u0015\u0001\0\u0005\u0015\u0001\u0016\u0001\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u000f\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0" +
+          "\u0002\u0015\u0003\0\u0001\u0015\u0004\0\u0003\u0015\u0004\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0002\u0015\u0004\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0003\0\u0002\u0015\u0001\0\u0001\u0015\u0005\0\u0003\u0015\u0001\0" +
+          "\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0016\u0001\0\u0001\u0015\u0008\0" +
+          "\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0" +
+          "\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0003\u0015\u0003\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0" +
+          "\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0009\0" +
+          "\u0002\u0015\u000f\0\u0001\u0015\u0006\0\u0002\u0015\u0004\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u000d\0" +
+          "\u0001\u0015\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u0007\0\u0001\u0015\u0002\0\u0001\u0015\u0005\0\u0001\u0015\u0002\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0046\0\u0001\u001b\u0011\0\u0001\u0017\u001d\0" +
+          "\u0001\u001a\u0003\0\u0001\u001a\u0003\0\u0001\u001a\u0001\0\u0003\u001a\u0002\0" +
+          "\u0001\u001a\u0002\0\u0001\u001a\u0001\0\u0003\u001a\u0003\0\u0002\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0002\u001a\u0001\0\u0002\u001a\u0003\0\u0002\u001a\u0001\0" +
+          "\u0001\u001a\u0003\0\u0002\u001a\u0001\0\u0002\u001a\u0001\0\u0002\u001a\u0001\0" +
+          "\u0005\u001a\u0001\0\u0005\u001a\u0002\0\u0001\u001a\u0001\0\u0002\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0003\u001a\u0004\0\u0001\u001a\u0004\0\u0001\u001a\u000f\0" +
+          "\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0003\u001a\u0001\0\u0005\u001a\u0001\0\u0003\u001a\u0001\0" +
+          "\u0009\u001a\u0001\0\u0002\u001a\u0001\0\u000e\u001a\u0001\0\u0002\u001a\u0001\0" +
+          "\u0011\u001a\u0001\0\u0001\u001a\u0001\0\u0003\u001a\u0002\0\u0001\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0002\u001a\u0001\0\u0001\u001a\u000f\0\u0001\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0001\u001a\u0003\0\u0001\u001a\u0001\0\u0003\u001a\u0001\0" +
+          "\u0002\u001a\u0001\0\u0002\u001a\u0001\0\u0003\u001a\u0001\0\u0009\u001a\u0001\0" +
+          "\u0002\u001a\u0001\0\u000e\u001a\u0001\0\u0002\u001a\u0001\0\u0011\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0003\u001a\u0002\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0" +
+          "\u0002\u001a\u0001\0\u0001\u001a\u000f\0\u0001\u001a\u0009\0\u0001\u001a\u0010\0" +
+          "\u0001\u001a\u001b\0\u0001\u001a\u0011\0\u0001\u001a\u0008\0\u0001\u001a\u0014\0" +
+          "\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0" +
+          "\u0001\u001a\u0001\0\u0003\u001a\u0001\0\u0005\u001a\u0001\0\u0003\u001a\u0001\0" +
+          "\u0006\u001a\u0001\0\u0002\u001a\u0001\0\u0002\u001a\u0001\0\u0008\u001a\u0001\0" +
+          "\u0005\u001a\u0001\0\u0002\u001a\u0001\0\u0011\u001a\u0001\0\u0001\u001a\u0001\0" +
+          "\u0003\u001a\u0002\0\u0001\u001a\u0001\0\u0001\u001a\u0001\0\u0002\u001a\u0001\0" +
+          "\u0001\u001a\u0066\0\u0001\u001b\u000e\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0" +
+          "\u0001\u001f\u0001\0\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u0022\u0003\0" +
+          "\u0001\u0023\u0005\0\u0001\u0024\u0003\0\u0001\u0025\u0009\0\u0001\u0026\u0002\0" +
+          "\u0001\u0027\u000e\0\u0001\u0028\u0002\0\u0001\u0029\u0021\0\u0002\u0015\u0001\u002a" +
+          "\u0001\0\u0001\u002b\u0001\0\u0001\u002b\u0001\u002c\u0001\0\u0001\u0015\u0002\0" +
+          "\u0001\u0015\u0001\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0\u0001\u001f\u0001\0" +
+          "\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u002d\u0003\0\u0001\u002e\u0005\0" +
+          "\u0001\u002f\u0003\0\u0001\u0030\u0009\0\u0001\u0026\u0002\0\u0001\u0031\u000e\0" +
+          "\u0001\u0032\u0002\0\u0001\u0033\u0021\0\u0001\u0015\u0002\u0016\u0002\0\u0002\u0034" +
+          "\u0001\u0035\u0001\0\u0001\u0016\u0002\0\u0001\u0015\u000b\0\u0001\u0036\u000d\0" +
+          "\u0001\u0037\u000c\0\u0001\u0038\u000e\0\u0001\u0039\u0002\0\u0001\u003a\u0011\0" +
+          "\u0001\u003b\u0010\0\u0001\u0017\u0001\0\u0001\u0017\u0003\0\u0001\u002c\u0001\0" +
+          "\u0001\u0017\u0004\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0\u0001\u001f\u0001\0" +
+          "\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u003c\u0003\0\u0001\u002e\u0005\0" +
+          "\u0001\u002f\u0003\0\u0001\u003d\u0009\0\u0001\u0026\u0002\0\u0001\u003e\u000e\0" +
+          "\u0001\u003f\u0002\0\u0001\u0040\u0011\0\u0001\u0041\u000f\0\u0001\u0015\u0001\u0042" +
+          "\u0001\u0016\u0001\u0043\u0003\0\u0001\u0042\u0001\0\u0001\u0042\u0002\0\u0001\u0015" +
+          "\u0062\0\u0002\u0019\u000e\0\u0001\u0044\u000d\0\u0001\u0045\u000c\0\u0001\u0046" +
+          "\u000e\0\u0001\u0047\u0002\0\u0001\u0048\u0022\0\u0001\u001a\u0007\0\u0001\u001a" +
+          "\u000e\0\u0001\u0049\u000d\0\u0001\u004a\u000c\0\u0001\u004b\u000e\0\u0001\u004c" +
+          "\u0002\0\u0001\u004d\u0022\0\u0001\u001b\u0007\0\u0001\u001b\u0004\0\u0001\u001d" +
+          "\u0001\0\u0001\u001e\u0001\0\u0001\u001f\u0001\0\u0001\u0020\u0001\0\u0001\u0021" +
+          "\u0001\0\u0001\u004e\u0003\0\u0001\u0023\u0005\0\u0001\u0024\u0003\0\u0001\u004f" +
+          "\u0009\0\u0001\u0026\u0002\0\u0001\u0050\u000e\0\u0001\u0051\u0002\0\u0001\u0052" +
+          "\u0021\0\u0001\u0015\u0001\u001c\u0001\u002a\u0001\0\u0001\u002b\u0001\0\u0001\u002b" +
+          "\u0001\u002c\u0001\0\u0001\u001c\u0002\0\u0001\u001c\u0002\0\u0001\u0015\u0009\0" +
+          "\u0003\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0" +
+          "\u0001\u0015\u0004\0\u0001\u0015\u0001\0\u0002\u0015\u0004\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u0003\0\u0001\u0015\u0004\0\u0005\u0015\u0008\0\u0001\u002a\u0001\0" +
+          "\u0002\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0001\0" +
+          "\u0001\u002a\u0007\0\u0002\u0015\u0002\0\u0005\u0015\u0002\0\u0002\u0015\u0004\0" +
+          "\u0006\u0015\u0001\0\u0002\u0015\u0004\0\u0005\u0015\u0001\0\u0005\u0015\u0001\0" +
+          "\u0002\u0015\u0001\0\u0003\u0015\u0001\0\u0004\u0015\u0001\0\u0005\u0015\u0001\u002a" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0002\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0002\0\u0001\u0015\u000f\0\u0001\u0015" +
+          "\u0003\0\u0001\u0015\u0005\0\u0002\u0015\u0003\0\u0001\u0015\u0004\0\u0003\u0015" +
+          "\u0004\0\u0001\u0015\u0001\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0002\u0015" +
+          "\u0004\0\u0001\u0015\u0001\0\u0001\u0015\u0003\0\u0002\u0015\u0001\0\u0001\u0015" +
+          "\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u002a" +
+          "\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015" +
+          "\u0006\0\u0002\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0009\u0015\u0002\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015" +
+          "\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015" +
+          "\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0001\u0015" +
+          "\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0\u0003\u0015" +
+          "\u0005\0\u0001\u0015\u0002\0\u0002\u0015\u0004\0\u0001\u0015\u0002\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0043\0\u0002\u0015\u0006\0\u0001\u0015\u002e\0\u0001\u0015" +
+          "\u0003\0\u0001\u0015\u0002\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0001\u0015" +
+          "\u0007\0\u0001\u0015\u0004\0\u0002\u0015\u0003\0\u0002\u0015\u0001\0\u0001\u0015" +
+          "\u0004\0\u0001\u0015\u0001\0\u0001\u0015\u0002\0\u0002\u0015\u0001\0\u0003\u0015" +
+          "\u0001\0\u0001\u0015\u0002\0\u0004\u0015\u0002\0\u0001\u0015\u0021\0\u0001\u001d" +
+          "\u0001\0\u0001\u001e\u0001\0\u0001\u001f\u0001\0\u0001\u0020\u0001\0\u0001\u0021" +
+          "\u0001\0\u0001\u0053\u0003\0\u0001\u0023\u0005\0\u0001\u0024\u0003\0\u0001\u0054" +
+          "\u0009\0\u0001\u0026\u0002\0\u0001\u0055\u000e\0\u0001\u0056\u0002\0\u0001\u0057" +
+          "\u0021\0\u0001\u0015\u0002\u002a\u0002\0\u0002\u0058\u0001\u002c\u0001\0\u0001\u002a" +
+          "\u0002\0\u0001\u0015\u0001\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0\u0001\u001f" +
+          "\u0001\0\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u0059\u0003\0\u0001\u005a" +
+          "\u0005\0\u0001\u005b\u0003\0\u0001\u005c\u0009\0\u0001\u0026\u0002\0\u0001\u005d" +
+          "\u000e\0\u0001\u005e\u0002\0\u0001\u005f\u0021\0\u0001\u0015\u0001\u002b\u0007\0" +
+          "\u0001\u002b\u0002\0\u0001\u0015\u0001\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0" +
+          "\u0001\u001f\u0001\0\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u0060\u0003\0" +
+          "\u0001\u0023\u0005\0\u0001\u0024\u0003\0\u0001\u0061\u0009\0\u0001\u0026\u0002\0" +
+          "\u0001\u0062\u000e\0\u0001\u0063\u0002\0\u0001\u0064\u0011\0\u0001\u0041\u000f\0" +
+          "\u0001\u0015\u0001\u002c\u0001\u002a\u0001\u0043\u0003\0\u0001\u002c\u0001\0\u0001\u002c" +
+          "\u0002\0\u0001\u0015\u0002\0\u0001\u0016\u0009\0\u0003\u0015\u0005\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0001\u0016" +
+          "\u0001\0\u0002\u0016\u0004\0\u0001\u0015\u0005\0\u0001\u0015\u0003\0\u0001\u0016" +
+          "\u0004\0\u0001\u0016\u0002\u0015\u0002\u0016\u0008\0\u0001\u0016\u0001\0\u0002\u0015" +
+          "\u0001\0\u0001\u0016\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015" +
+          "\u0006\0\u0002\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0009\u0015\u0002\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015" +
+          "\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0001\0\u0001\u0016" +
+          "\u0001\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015" +
+          "\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0001\u0015" +
+          "\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0\u0001\u0015" +
+          "\u0002\u0016\u0005\0\u0001\u0015\u0002\0\u0001\u0015\u0001\u0016\u0004\0\u0001\u0015" +
+          "\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0043\0\u0002\u0016\u0006\0\u0001\u0016" +
+          "\u002e\0\u0001\u0016\u0003\0\u0001\u0016\u0002\0\u0001\u0016\u0003\0\u0001\u0016" +
+          "\u0005\0\u0001\u0016\u0007\0\u0001\u0016\u0004\0\u0002\u0016\u0003\0\u0002\u0016" +
+          "\u0001\0\u0001\u0016\u0004\0\u0001\u0016\u0001\0\u0001\u0016\u0002\0\u0002\u0016" +
+          "\u0001\0\u0003\u0016\u0001\0\u0001\u0016\u0002\0\u0004\u0016\u0002\0\u0001\u0016" +
+          "\u002b\0\u0001\u0065\u0003\0\u0001\u0066\u0005\0\u0001\u0067\u0003\0\u0001\u0068" +
+          "\u000c\0\u0001\u0069\u000e\0\u0001\u006a\u0002\0\u0001\u006b\u0022\0\u0001\u0034" +
+          "\u0001\u0016\u0006\0\u0001\u0034\u0004\0\u0001\u001d\u0001\0\u0001\u001e\u0001\0" +
+          "\u0001\u001f\u0001\0\u0001\u0020\u0001\0\u0001\u0021\u0001\0\u0001\u006c\u0003\0" +
+          "\u0001\u002e\u0005\0\u0001\u002f\u0003\0\u0001\u006d\u0009\0\u0001\u0026\u0002\0" +
+          "\u0001\u006e\u000e\0\u0001\u006f\u0002\0\u0001\u0070\u0011\0\u0001\u0041\u000f\0" +
+          "\u0001\u0015\u0001\u0035\u0001\u0016\u0001\u0043\u0003\0\u0001\u0035\u0001\0\u0001\u0035" +
+          "\u0002\0\u0001\u0015\u0002\0\u0001\u0017\u001f\0\u0001\u0017\u0001\0\u0002\u0017" +
+          "\u000e\0\u0001\u0017\u0004\0\u0001\u0017\u0002\0\u0002\u0017\u000d\0\u0001\u0017" +
+          "\u005a\0\u0001\u0017\u006b\0\u0002\u0017\u0009\0\u0001\u0017\u004d\0\u0002\u0017" +
+          "\u0006\0\u0001\u0017\u002e\0\u0001\u0017\u0003\0\u0001\u0017\u0002\0\u0001\u0017" +
+          "\u0003\0\u0001\u0017\u0005\0\u0001\u0017\u0007\0\u0001\u0017\u0004\0\u0002\u0017" +
+          "\u0003\0\u0002\u0017\u0001\0\u0001\u0017\u0004\0\u0001\u0017\u0001\0\u0001\u0017" +
+          "\u0002\0\u0002\u0017\u0001\0\u0003\u0017\u0001\0\u0001\u0017\u0002\0\u0004\u0017" +
+          "\u0002\0\u0001\u0017\u006b\0\u0001\u0017\u001d\0\u0001\u0042\u0009\0\u0003\u0015" +
+          "\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015" +
+          "\u0004\0\u0001\u0042\u0001\0\u0002\u0042\u0004\0\u0001\u0015\u0005\0\u0001\u0015" +
+          "\u0003\0\u0001\u0042\u0004\0\u0001\u0042\u0002\u0015\u0002\u0042\u0008\0\u0001\u0016" +
+          "\u0001\0\u0002\u0015\u0001\0\u0001\u0042\u0008\0\u0001\u0015\u0014\0\u0001\u0015" +
+          "\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0\u0001\u0015\u0004\0\u0001\u0015" +
+          "\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015" +
+          "\u0001\0\u0001\u0042\u0001\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0\u0001\u0015" +
+          "\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0003\0\u0001\u0015" +
+          "\u0005\0\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015" +
+          "\u0005\0\u0001\u0015\u0002\u0042\u0005\0\u0001\u0015\u0002\0\u0001\u0015\u0001\u0042" +
+          "\u0004\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0043\0\u0002\u0042" +
+          "\u0006\0\u0001\u0042\u002e\0\u0001\u0042\u0003\0\u0001\u0042\u0002\0\u0001\u0042" +
+          "\u0003\0\u0001\u0042\u0005\0\u0001\u0042\u0007\0\u0001\u0042\u0004\0\u0002\u0042" +
+          "\u0003\0\u0002\u0042\u0001\0\u0001\u0042\u0004\0\u0001\u0042\u0001\0\u0001\u0042" +
+          "\u0002\0\u0002\u0042\u0001\0\u0003\u0042\u0001\0\u0001\u0042\u0002\0\u0004\u0042" +
+          "\u0002\0\u0001\u0042\u006b\0\u0001\u0043\u0026\0\u0001\u0071\u000d\0\u0001\u0072" +
+          "\u000c\0\u0001\u0073\u000e\0\u0001\u0074\u0002\0\u0001\u0075\u0011\0\u0001\u0041" +
+          "\u0010\0\u0001\u0043\u0001\0\u0001\u0043\u0003\0\u0001\u002c\u0001\0\u0001\u0043" +
+          "\u0005\0\u0001\u001a\u001f\0\u0001\u001a\u0001\0\u0002\u001a\u000e\0\u0001\u001a" +
+          "\u0004\0\u0001\u001a\u0002\0\u0002\u001a\u000d\0\u0001\u001a\u005a\0\u0001\u001a" +
+          "\u006b\0\u0002\u001a\u0009\0\u0001\u001a\u004d\0\u0002\u001a\u0006\0\u0001\u001a" +
+          "\u002e\0\u0001\u001a\u0003\0\u0001\u001a\u0002\0\u0001\u001a\u0003\0\u0001\u001a" +
+          "\u0005\0\u0001\u001a\u0007\0\u0001\u001a\u0004\0\u0002\u001a\u0003\0\u0002\u001a" +
+          "\u0001\0\u0001\u001a\u0004\0\u0001\u001a\u0001\0\u0001\u001a\u0002\0\u0002\u001a" +
+          "\u0001\0\u0003\u001a\u0001\0\u0001\u001a\u0002\0\u0004\u001a\u0002\0\u0001\u001a" +
+          "\u0022\0\u0001\u001b\u001f\0\u0001\u001b\u0001\0\u0002\u001b\u000e\0\u0001\u001b" +
+          "\u0004\0\u0001\u001b\u0002\0\u0002\u001b\u000d\0\u0001\u001b\u005a\0\u0001\u001b" +
+          "\u006b\0\u0002\u001b\u0009\0\u0001\u001b\u004d\0\u0002\u001b\u0006\0\u0001\u001b" +
+          "\u002e\0\u0001\u001b\u0003\0\u0001\u001b\u0002\0\u0001\u001b\u0003\0\u0001\u001b" +
+          "\u0005\0\u0001\u001b\u0007\0\u0001\u001b\u0004\0\u0002\u001b\u0003\0\u0002\u001b" +
+          "\u0001\0\u0001\u001b\u0004\0\u0001\u001b\u0001\0\u0001\u001b\u0002\0\u0002\u001b" +
+          "\u0001\0\u0003\u001b\u0001\0\u0001\u001b\u0002\0\u0004\u001b\u0002\0\u0001\u001b" +
+          "\u0022\0\u0001\u001c\u0009\0\u0003\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0001\u001c\u0001\0\u0002\u001c" +
+          "\u0004\0\u0001\u0015\u0005\0\u0001\u0015\u0003\0\u0001\u001c\u0004\0\u0001\u001c" +
+          "\u0002\u0015\u0002\u001c\u0008\0\u0001\u002a\u0001\0\u0002\u0015\u0001\0\u0001\u001c" +
+          "\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015" +
+          "\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015" +
+          "\u0002\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0001\0\u0001\u001c\u0001\0\u0002\u0015" +
+          "\u0004\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015" +
+          "\u0011\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0001\u0015\u001a\0\u000d\u0015" +
+          "\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0\u0001\u0015\u0002\u001c\u0005\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u0001\u001c\u0004\0\u0001\u0015\u0002\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0043\0\u0002\u001c\u0006\0\u0001\u001c\u002e\0\u0001\u001c" +
+          "\u0003\0\u0001\u001c\u0002\0\u0001\u001c\u0003\0\u0001\u001c\u0005\0\u0001\u001c" +
+          "\u0007\0\u0001\u001c\u0004\0\u0002\u001c\u0003\0\u0002\u001c\u0001\0\u0001\u001c" +
+          "\u0004\0\u0001\u001c\u0001\0\u0001\u001c\u0002\0\u0002\u001c\u0001\0\u0003\u001c" +
+          "\u0001\0\u0001\u001c\u0002\0\u0004\u001c\u0002\0\u0001\u001c\u0022\0\u0001\u002a" +
+          "\u0009\0\u0003\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0004\0\u0001\u0015\u0004\0\u0001\u002a\u0001\0\u0002\u002a\u0004\0\u0001\u0015" +
+          "\u0005\0\u0001\u0015\u0003\0\u0001\u002a\u0004\0\u0001\u002a\u0002\u0015\u0002\u002a" +
+          "\u0008\0\u0001\u002a\u0001\0\u0002\u0015\u0001\0\u0001\u002a\u0008\0\u0001\u0015" +
+          "\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0\u0001\u0015" +
+          "\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0\u0001\u0015" +
+          "\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0001\0\u0003\u0015\u0001\0\u0001\u002a\u0001\0\u0002\u0015\u0004\0\u0003\u0015" +
+          "\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015" +
+          "\u0003\0\u0001\u0015\u0005\0\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015" +
+          "\u0001\0\u0001\u0015\u0005\0\u0001\u0015\u0002\u002a\u0005\0\u0001\u0015\u0002\0" +
+          "\u0001\u0015\u0001\u002a\u0004\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015" +
+          "\u0043\0\u0002\u002a\u0006\0\u0001\u002a\u002e\0\u0001\u002a\u0003\0\u0001\u002a" +
+          "\u0002\0\u0001\u002a\u0003\0\u0001\u002a\u0005\0\u0001\u002a\u0007\0\u0001\u002a" +
+          "\u0004\0\u0002\u002a\u0003\0\u0002\u002a\u0001\0\u0001\u002a\u0004\0\u0001\u002a" +
+          "\u0001\0\u0001\u002a\u0002\0\u0002\u002a\u0001\0\u0003\u002a\u0001\0\u0001\u002a" +
+          "\u0002\0\u0004\u002a\u0002\0\u0001\u002a\u002b\0\u0001\u0076\u0003\0\u0001\u0077" +
+          "\u0005\0\u0001\u0078\u0003\0\u0001\u0079\u000c\0\u0001\u007a\u000e\0\u0001\u007b" +
+          "\u0002\0\u0001\u007c\u0022\0\u0001\u0058\u0001\u002a\u0006\0\u0001\u0058\u0005\0" +
+          "\u0001\u002b\u0009\0\u0003\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0001\u002b\u0001\0\u0002\u002b\u0004\0" +
+          "\u0001\u0015\u0005\0\u0001\u0015\u0003\0\u0001\u002b\u0004\0\u0001\u002b\u0002\u0015" +
+          "\u0002\u002b\u000a\0\u0002\u0015\u0001\0\u0001\u002b\u0008\0\u0001\u0015\u0014\0" +
+          "\u0001\u0015\u0009\0\u0002\u0015\u0002\0\u0005\u0015\u0002\0\u0002\u0015\u0004\0" +
+          "\u0006\u0015\u0001\0\u0002\u0015\u0004\0\u0005\u0015\u0001\0\u0005\u0015\u0001\0" +
+          "\u0002\u0015\u0001\0\u0003\u0015\u0001\0\u0004\u0015\u0001\0\u0005\u0015\u0002\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0002\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0002\0\u0001\u0015\u000f\0\u0001\u0015\u0003\0" +
+          "\u0001\u0015\u0005\0\u0002\u0015\u0003\0\u0001\u0015\u0004\0\u0003\u0015\u0004\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0002\u0015\u0004\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0003\0\u0002\u0015\u0001\0\u0001\u0015\u0005\0" +
+          "\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0004\0\u0001\u0015\u0008\0" +
+          "\u0001\u0015\u0014\0\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0" +
+          "\u0001\u0015\u0004\0\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0003\u0015\u0001\0\u0001\u002b\u0001\0\u0002\u0015\u0004\0" +
+          "\u0003\u0015\u0001\0\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0" +
+          "\u0001\u0015\u0003\0\u0001\u0015\u0005\0\u0001\u0015\u001a\0\u000d\u0015\u0005\0" +
+          "\u0003\u0015\u0001\0\u0001\u0015\u0005\0\u0001\u0015\u0002\u002b\u0005\0\u0001\u0015" +
+          "\u0002\0\u0001\u0015\u0001\u002b\u0004\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0043\0\u0002\u002b\u0006\0\u0001\u002b\u002e\0\u0001\u002b\u0003\0" +
+          "\u0001\u002b\u0002\0\u0001\u002b\u0003\0\u0001\u002b\u0005\0\u0001\u002b\u0007\0" +
+          "\u0001\u002b\u0004\0\u0002\u002b\u0003\0\u0002\u002b\u0001\0\u0001\u002b\u0004\0" +
+          "\u0001\u002b\u0001\0\u0001\u002b\u0002\0\u0002\u002b\u0001\0\u0003\u002b\u0001\0" +
+          "\u0001\u002b\u0002\0\u0004\u002b\u0002\0\u0001\u002b\u0022\0\u0001\u002c\u0009\0" +
+          "\u0003\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0" +
+          "\u0001\u0015\u0004\0\u0001\u002c\u0001\0\u0002\u002c\u0004\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u0003\0\u0001\u002c\u0004\0\u0001\u002c\u0002\u0015\u0002\u002c\u0008\0" +
+          "\u0001\u002a\u0001\0\u0002\u0015\u0001\0\u0001\u002c\u0008\0\u0001\u0015\u0014\0" +
+          "\u0001\u0015\u0003\0\u0001\u0015\u0006\0\u0002\u0015\u0005\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0009\u0015\u0002\0\u0001\u0015\u0004\0" +
+          "\u0001\u0015\u0004\0\u0006\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0" +
+          "\u0003\u0015\u0001\0\u0001\u002c\u0001\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0" +
+          "\u0001\u0015\u0008\0\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0003\0" +
+          "\u0001\u0015\u0005\0\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0" +
+          "\u0001\u0015\u0005\0\u0001\u0015\u0002\u002c\u0005\0\u0001\u0015\u0002\0\u0001\u0015" +
+          "\u0001\u002c\u0004\0\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0043\0" +
+          "\u0002\u002c\u0006\0\u0001\u002c\u002e\0\u0001\u002c\u0003\0\u0001\u002c\u0002\0" +
+          "\u0001\u002c\u0003\0\u0001\u002c\u0005\0\u0001\u002c\u0007\0\u0001\u002c\u0004\0" +
+          "\u0002\u002c\u0003\0\u0002\u002c\u0001\0\u0001\u002c\u0004\0\u0001\u002c\u0001\0" +
+          "\u0001\u002c\u0002\0\u0002\u002c\u0001\0\u0003\u002c\u0001\0\u0001\u002c\u0002\0" +
+          "\u0004\u002c\u0002\0\u0001\u002c\u0022\0\u0001\u0034\u001f\0\u0001\u0034\u0001\0" +
+          "\u0002\u0034\u000e\0\u0001\u0034\u0004\0\u0001\u0034\u0002\0\u0002\u0034\u0008\0" +
+          "\u0001\u0016\u0004\0\u0001\u0034\u001f\0\u0001\u0016\u0042\0\u0001\u0016\u0067\0" +
+          "\u0002\u0016\u005c\0\u0001\u0034\u006b\0\u0002\u0034\u0009\0\u0001\u0034\u004d\0" +
+          "\u0002\u0034\u0006\0\u0001\u0034\u002e\0\u0001\u0034\u0003\0\u0001\u0034\u0002\0" +
+          "\u0001\u0034\u0003\0\u0001\u0034\u0005\0\u0001\u0034\u0007\0\u0001\u0034\u0004\0" +
+          "\u0002\u0034\u0003\0\u0002\u0034\u0001\0\u0001\u0034\u0004\0\u0001\u0034\u0001\0" +
+          "\u0001\u0034\u0002\0\u0002\u0034\u0001\0\u0003\u0034\u0001\0\u0001\u0034\u0002\0" +
+          "\u0004\u0034\u0002\0\u0001\u0034\u0022\0\u0001\u0035\u0009\0\u0003\u0015\u0005\0" +
+          "\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0" +
+          "\u0001\u0035\u0001\0\u0002\u0035\u0004\0\u0001\u0015\u0005\0\u0001\u0015\u0003\0" +
+          "\u0001\u0035\u0004\0\u0001\u0035\u0002\u0015\u0002\u0035\u0008\0\u0001\u0016\u0001\0" +
+          "\u0002\u0015\u0001\0\u0001\u0035\u0008\0\u0001\u0015\u0014\0\u0001\u0015\u0003\0" +
+          "\u0001\u0015\u0006\0\u0002\u0015\u0005\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0" +
+          "\u0001\u0015\u0001\0\u0009\u0015\u0002\0\u0001\u0015\u0004\0\u0001\u0015\u0004\0" +
+          "\u0006\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0001\0\u0003\u0015\u0001\0" +
+          "\u0001\u0035\u0001\0\u0002\u0015\u0004\0\u0003\u0015\u0001\0\u0001\u0015\u0008\0" +
+          "\u0001\u0015\u0001\0\u0002\u0015\u0011\0\u0001\u0015\u0003\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u001a\0\u000d\u0015\u0005\0\u0003\u0015\u0001\0\u0001\u0015\u0005\0" +
+          "\u0001\u0015\u0002\u0035\u0005\0\u0001\u0015\u0002\0\u0001\u0015\u0001\u0035\u0004\0" +
+          "\u0001\u0015\u0002\0\u0001\u0015\u0001\0\u0001\u0015\u0043\0\u0002\u0035\u0006\0" +
+          "\u0001\u0035\u002e\0\u0001\u0035\u0003\0\u0001\u0035\u0002\0\u0001\u0035\u0003\0" +
+          "\u0001\u0035\u0005\0\u0001\u0035\u0007\0\u0001\u0035\u0004\0\u0002\u0035\u0003\0" +
+          "\u0002\u0035\u0001\0\u0001\u0035\u0004\0\u0001\u0035\u0001\0\u0001\u0035\u0002\0" +
+          "\u0002\u0035\u0001\0\u0003\u0035\u0001\0\u0001\u0035\u0002\0\u0004\u0035\u0002\0" +
+          "\u0001\u0035\u0022\0\u0001\u0043\u001f\0\u0001\u0043\u0001\0\u0002\u0043\u000e\0" +
+          "\u0001\u0043\u0004\0\u0001\u0043\u0002\0\u0002\u0043\u000d\0\u0001\u0043\u005a\0" +
+          "\u0001\u0043\u006b\0\u0002\u0043\u0009\0\u0001\u0043\u004d\0\u0002\u0043\u0006\0" +
+          "\u0001\u0043\u002e\0\u0001\u0043\u0003\0\u0001\u0043\u0002\0\u0001\u0043\u0003\0" +
+          "\u0001\u0043\u0005\0\u0001\u0043\u0007\0\u0001\u0043\u0004\0\u0002\u0043\u0003\0" +
+          "\u0002\u0043\u0001\0\u0001\u0043\u0004\0\u0001\u0043\u0001\0\u0001\u0043\u0002\0" +
+          "\u0002\u0043\u0001\0\u0003\u0043\u0001\0\u0001\u0043\u0002\0\u0004\u0043\u0002\0" +
+          "\u0001\u0043\u0022\0\u0001\u0058\u001f\0\u0001\u0058\u0001\0\u0002\u0058\u000e\0" +
+          "\u0001\u0058\u0004\0\u0001\u0058\u0002\0\u0002\u0058\u0008\0\u0001\u002a\u0004\0" +
+          "\u0001\u0058\u001f\0\u0001\u002a\u0042\0\u0001\u002a\u0067\0\u0002\u002a\u005c\0" +
+          "\u0001\u0058\u006b\0\u0002\u0058\u0009\0\u0001\u0058\u004d\0\u0002\u0058\u0006\0" +
+          "\u0001\u0058\u002e\0\u0001\u0058\u0003\0\u0001\u0058\u0002\0\u0001\u0058\u0003\0" +
+          "\u0001\u0058\u0005\0\u0001\u0058\u0007\0\u0001\u0058\u0004\0\u0002\u0058\u0003\0" +
+          "\u0002\u0058\u0001\0\u0001\u0058\u0004\0\u0001\u0058\u0001\0\u0001\u0058\u0002\0" +
+          "\u0002\u0058\u0001\0\u0003\u0058\u0001\0\u0001\u0058\u0002\0\u0004\u0058\u0002\0" +
+          "\u0001\u0058\u0020\0";
+
+        private static int[] zzUnpackTrans()
+        {
+            int[] result = new int[11845];
+            int offset = 0;
+            offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackTrans(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                value--;
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+
+        /* error codes */
+        private const int ZZ_UNKNOWN_ERROR = 0;
+        private const int ZZ_NO_MATCH = 1;
+        private const int ZZ_PUSHBACK_2BIG = 2;
+
+        /* error messages for the codes above */
+        private static readonly String[] ZZ_ERROR_MSG = {
+    "Unkown internal scanner error",
+    "Error: could not match input",
+    "Error: pushback value was too large"
+  };
+
+        /**
+         * ZZ_ATTRIBUTE[aState] contains the attributes of state <code>aState</code>
+         */
+        private static readonly int[] ZZ_ATTRIBUTE = zzUnpackAttribute();
+
+        private const String ZZ_ATTRIBUTE_PACKED_0 =
+          "\u0001\0\u0001\u0009\u001a\u0001\u000d\0\u0001\u0001\u0001\0\u0001\u0001\u0008\0" +
+          "\u0001\u0001\u000d\0\u0001\u0001\u0039\0";
+
+        private static int[] zzUnpackAttribute()
+        {
+            int[] result = new int[124];
+            int offset = 0;
+            offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result);
+            return result;
+        }
+
+        private static int zzUnpackAttribute(String packed, int offset, int[] result)
+        {
+            int i = 0;       /* index in packed string  */
+            int j = offset;  /* index in unpacked array */
+            int l = packed.Length;
+            while (i < l)
+            {
+                int count = packed[i++];
+                int value = packed[i++];
+                do result[j++] = value; while (--count > 0);
+            }
+            return j;
+        }
+
+        /** the input device */
+        private TextReader zzReader;
+
+        /** the current state of the DFA */
+        private int zzState;
+
+        /** the current lexical state */
+        private int zzLexicalState = YYINITIAL;
+
+        /** this buffer contains the current text to be matched and is
+            the source of the yytext() string */
+        private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
+
+        /** the textposition at the last accepting state */
+        private int zzMarkedPos;
+
+        /** the current text position in the buffer */
+        private int zzCurrentPos;
+
+        /** startRead marks the beginning of the yytext() string in the buffer */
+        private int zzStartRead;
+
+        /** endRead marks the last character in the buffer, that has been read
+            from input */
+        private int zzEndRead;
+
+        /** number of newlines encountered up to the start of the matched text */
+        private int yyline;
+
+        /** the number of characters up to the start of the matched text */
+        private int yychar;
+
+        /**
+         * the number of characters from the last newline up to the start of the 
+         * matched text
+         */
+        private int yycolumn;
+
+        /** 
+         * zzAtBOL == true <=> the scanner is currently at the beginning of a line
+         */
+        private bool zzAtBOL = true;
+
+        /** zzAtEOF == true <=> the scanner is at the EOF */
+        private bool zzAtEOF;
+
+        /** denotes if the user-EOF-code has already been executed */
+        private bool zzEOFDone;
+
+        /* user code: */
+        /** Alphanumeric sequences */
+        public const int WORD_TYPE = StandardTokenizer.ALPHANUM;
+
+        /** Numbers */
+        public const int NUMERIC_TYPE = StandardTokenizer.NUM;
+
+        /**
+         * Chars in class \p{Line_Break = Complex_Context} are from South East Asian
+         * scripts (Thai, Lao, Myanmar, Khmer, etc.).  Sequences of these are kept 
+         * together as as a single token rather than broken up, because the logic
+         * required to break them at word boundaries is too complex for UAX#29.
+         * <p>
+         * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
+         */
+        public const int SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.SOUTHEAST_ASIAN;
+
+        public const int IDEOGRAPHIC_TYPE = StandardTokenizer.IDEOGRAPHIC;
+
+        public const int HIRAGANA_TYPE = StandardTokenizer.HIRAGANA;
+
+        public const int KATAKANA_TYPE = StandardTokenizer.KATAKANA;
+
+        public const int HANGUL_TYPE = StandardTokenizer.HANGUL;
+
+        public int YYChar
+        {
+            get
+            {
+                return yychar;
+            }
+        }
+
+        /**
+         * Fills CharTermAttribute with the current token text.
+         */
+        public void GetText(ICharTermAttribute t)
+        {
+            t.CopyBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+
+
+        /**
+         * Creates a new scanner
+         * There is also a java.io.InputStream version of this constructor.
+         *
+         * @param   in  the TextReader to read input from.
+         */
+        public StandardTokenizerImpl34(TextReader input)
+        {
+            this.zzReader = input;
+        }
+
+
+
+        /** 
+         * Unpacks the compressed character translation table.
+         *
+         * @param packed   the packed character translation table
+         * @return         the unpacked character translation table
+         */
+        private static char[] zzUnpackCMap(String packed)
+        {
+            char[] map = new char[0x10000];
+            int i = 0;  /* index in packed string  */
+            int j = 0;  /* index in unpacked array */
+            while (i < 2650)
+            {
+                int count = packed[i++];
+                char value = packed[i++];
+                do map[j++] = value; while (--count > 0);
+            }
+            return map;
+        }
+
+
+        /**
+         * Refills the input buffer.
+         *
+         * @return      <code>false</code>, iff there was new input.
+         * 
+         * @exception   java.io.IOException  if any I/O-Error occurs
+         */
+        private bool zzRefill()
+        {
+
+            /* first: make room (if you can) */
+            if (zzStartRead > 0)
+            {
+                Array.Copy(zzBuffer, zzStartRead,
+                                 zzBuffer, 0,
+                                 zzEndRead - zzStartRead);
+
+                /* translate stored positions */
+                zzEndRead -= zzStartRead;
+                zzCurrentPos -= zzStartRead;
+                zzMarkedPos -= zzStartRead;
+                zzStartRead = 0;
+            }
+
+            /* is the buffer big enough? */
+            if (zzCurrentPos >= zzBuffer.Length)
+            {
+                /* if not: blow it up */
+                char[] newBuffer = new char[zzCurrentPos * 2];
+                Array.Copy(zzBuffer, 0, newBuffer, 0, zzBuffer.Length);
+                zzBuffer = newBuffer;
+            }
+
+            /* finally: fill the buffer with new input */
+            int numRead = zzReader.Read(zzBuffer, zzEndRead,
+                                                    zzBuffer.Length - zzEndRead);
+
+            if (numRead > 0)
+            {
+                zzEndRead += numRead;
+                return false;
+            }
+            // unlikely but not impossible: read 0 characters, but not at end of stream    
+            if (numRead == 0)
+            {
+                int c = zzReader.Read();
+                if (c <= 0)
+                {
+                    return true;
+                }
+                else
+                {
+                    zzBuffer[zzEndRead++] = (char)c;
+                    return false;
+                }
+            }
+
+            // numRead < 0
+            return true;
+        }
+
+
+        /**
+         * Closes the input stream.
+         */
+        public void yyclose()
+        {
+            zzAtEOF = true;            /* indicate end of file */
+            zzEndRead = zzStartRead;  /* invalidate buffer    */
+
+            if (zzReader != null)
+                zzReader.Close();
+        }
+
+
+        /**
+         * Resets the scanner to read from a new input stream.
+         * Does not close the old reader.
+         *
+         * All internal variables are reset, the old input stream 
+         * <b>cannot</b> be reused (internal buffer is discarded and lost).
+         * Lexical state is set to <tt>ZZ_INITIAL</tt>.
+         *
+         * Internal scan buffer is resized down to its initial length, if it has grown.
+         *
+         * @param reader   the new input stream 
+         */
+        public void YYReset(TextReader reader)
+        {
+            zzReader = reader;
+            zzAtBOL = true;
+            zzAtEOF = false;
+            zzEOFDone = false;
+            zzEndRead = zzStartRead = 0;
+            zzCurrentPos = zzMarkedPos = 0;
+            yyline = yychar = yycolumn = 0;
+            zzLexicalState = YYINITIAL;
+            if (zzBuffer.Length > ZZ_BUFFERSIZE)
+                zzBuffer = new char[ZZ_BUFFERSIZE];
+        }
+
+
+        /**
+         * Returns the current lexical state.
+         */
+        public int yystate()
+        {
+            return zzLexicalState;
+        }
+
+
+        /**
+         * Enters a new lexical state
+         *
+         * @param newState the new lexical state
+         */
+        public void yybegin(int newState)
+        {
+            zzLexicalState = newState;
+        }
+
+
+        /**
+         * Returns the text matched by the current regular expression.
+         */
+        public String yytext()
+        {
+            return new String(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+        }
+
+
+        /**
+         * Returns the character at position <tt>pos</tt> from the 
+         * matched text. 
+         * 
+         * It is equivalent to yytext().charAt(pos), but faster
+         *
+         * @param pos the position of the character to fetch. 
+         *            A value from 0 to yylength()-1.
+         *
+         * @return the character at position pos
+         */
+        public char yycharat(int pos)
+        {
+            return zzBuffer[zzStartRead + pos];
+        }
+
+
+        /**
+         * Returns the length of the matched text region.
+         */
+        public int YYLength
+        {
+            get
+            {
+                return zzMarkedPos - zzStartRead;
+            }
+        }
+
+
+        /**
+         * Reports an error that occured while scanning.
+         *
+         * In a wellformed scanner (no or only correct usage of 
+         * yypushback(int) and a match-all fallback rule) this method 
+         * will only be called with things that "Can't Possibly Happen".
+         * If this method is called, something is seriously wrong
+         * (e.g. a JFlex bug producing a faulty scanner etc.).
+         *
+         * Usual syntax/scanner level error handling should be done
+         * in error fallback rules.
+         *
+         * @param   errorCode  the code of the errormessage to display
+         */
+        private void zzScanError(int errorCode)
+        {
+            String message;
+            try
+            {
+                message = ZZ_ERROR_MSG[errorCode];
+            }
+            catch (IndexOutOfRangeException e)
+            {
+                message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR];
+            }
+
+            throw new Exception(message);
+        }
+
+
+        /**
+         * Pushes the specified amount of characters back into the input stream.
+         *
+         * They will be read again by then next call of the scanning method
+         *
+         * @param number  the number of characters to be read again.
+         *                This number must not be greater than yylength()!
+         */
+        public void yypushback(int number)
+        {
+            if (number > YYLength)
+                zzScanError(ZZ_PUSHBACK_2BIG);
+
+            zzMarkedPos -= number;
+        }
+
+
+        /**
+         * Resumes scanning until the next regular expression is matched,
+         * the end of input is encountered or an I/O-Error occurs.
+         *
+         * @return      the next token
+         * @exception   java.io.IOException  if any I/O-Error occurs
+         */
+        public int GetNextToken()
+        {
+            int zzInput;
+            int zzAction;
+
+            // cached fields:
+            int zzCurrentPosL;
+            int zzMarkedPosL;
+            int zzEndReadL = zzEndRead;
+            char[] zzBufferL = zzBuffer;
+            char[] zzCMapL = ZZ_CMAP;
+
+            int[] zzTransL = ZZ_TRANS;
+            int[] zzRowMapL = ZZ_ROWMAP;
+            int[] zzAttrL = ZZ_ATTRIBUTE;
+
+            while (true)
+            {
+                zzMarkedPosL = zzMarkedPos;
+
+                yychar += zzMarkedPosL - zzStartRead;
+
+                zzAction = -1;
+
+                zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL;
+
+                zzState = ZZ_LEXSTATE[zzLexicalState];
+
+                // set up zzAction for empty match case:
+                int zzAttributes = zzAttrL[zzState];
+                if ((zzAttributes & 1) == 1)
+                {
+                    zzAction = zzState;
+                }
+
+
+                //zzForAction: 
+                {
+                    while (true)
+                    {
+
+                        if (zzCurrentPosL < zzEndReadL)
+                            zzInput = zzBufferL[zzCurrentPosL++];
+                        else if (zzAtEOF)
+                        {
+                            zzInput = YYEOF;
+                            break;
+                        }
+                        else
+                        {
+                            // store back cached positions
+                            zzCurrentPos = zzCurrentPosL;
+                            zzMarkedPos = zzMarkedPosL;
+                            bool eof = zzRefill();
+                            // get translated positions and possibly new buffer
+                            zzCurrentPosL = zzCurrentPos;
+                            zzMarkedPosL = zzMarkedPos;
+                            zzBufferL = zzBuffer;
+                            zzEndReadL = zzEndRead;
+                            if (eof)
+                            {
+                                zzInput = YYEOF;
+                                break;
+                            }
+                            else
+                            {
+                                zzInput = zzBufferL[zzCurrentPosL++];
+                            }
+                        }
+                        int zzNext = zzTransL[zzRowMapL[zzState] + zzCMapL[zzInput]];
+                        if (zzNext == -1) break;
+                        zzState = zzNext;
+
+                        zzAttributes = zzAttrL[zzState];
+                        if ((zzAttributes & 1) == 1)
+                        {
+                            zzAction = zzState;
+                            zzMarkedPosL = zzCurrentPosL;
+                            if ((zzAttributes & 8) == 8) break;
+                        }
+
+                    }
+                }
+
+                // store back cached position
+                zzMarkedPos = zzMarkedPosL;
+
+                switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction])
+                {
+                    case 1:
+                        { /* Break so we don't hit fall-through warning: */
+                            break; /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
+                        }
+                    case 9: break;
+                    case 2:
+                        {
+                            return WORD_TYPE;
+                        }
+                    case 10: break;
+                    case 3:
+                        {
+                            return NUMERIC_TYPE;
+                        }
+                    case 11: break;
+                    case 4:
+                        {
+                            return KATAKANA_TYPE;
+                        }
+                    case 12: break;
+                    case 5:
+                        {
+                            return SOUTH_EAST_ASIAN_TYPE;
+                        }
+                    case 13: break;
+                    case 6:
+                        {
+                            return IDEOGRAPHIC_TYPE;
+                        }
+                    case 14: break;
+                    case 7:
+                        {
+                            return HIRAGANA_TYPE;
+                        }
+                    case 15: break;
+                    case 8:
+                        {
+                            return HANGUL_TYPE;
+                        }
+                    case 16: break;
+                    default:
+                        if (zzInput == YYEOF && zzStartRead == zzCurrentPos)
+                        {
+                            zzAtEOF = true;
+                            {
+                                return StandardTokenizerInterface.YYEOF;
+                            }
+                        }
+                        else
+                        {
+                            zzScanError(ZZ_NO_MATCH);
+                        }
+                        break;
+                }
+            }
+        }
+
+
+    }
+}


[25/50] [abbrv] git commit: Error Cleanup

Posted by mh...@apache.org.
Error Cleanup


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/25ec42a2
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/25ec42a2
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/25ec42a2

Branch: refs/heads/branch_4x
Commit: 25ec42a2811e6eb428e804522ffd14e22756c9a1
Parents: 2a56f3b
Author: Paul Irwin <pa...@gmail.com>
Authored: Tue Aug 6 15:05:09 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Aug 6 15:05:09 2013 -0400

----------------------------------------------------------------------
 src/core/Codecs/Compressing/CompressionMode.cs  |   4 +-
 src/core/Codecs/Lucene3x/Lucene3xFields.cs      |  12 +-
 .../Codecs/Lucene3x/Lucene3xNormsProducer.cs    |   2 +-
 .../Codecs/Lucene40/Lucene40PostingsReader.cs   |   2 +-
 .../Codecs/Lucene41/Lucene41PostingsReader.cs   |   2 +-
 src/core/Document/DocumentStoredFieldVisitor.cs |   2 +-
 src/core/Index/AtomicReaderContext.cs           |  10 ++
 src/core/Index/BaseCompositeReader.cs           |   2 +-
 src/core/Index/CheckIndex.cs                    |  10 +-
 src/core/Index/CompositeReader.cs               |   2 +-
 src/core/Index/ConcurrentMergeScheduler.cs      |  12 +-
 src/core/Index/DocFieldProcessor.cs             |   4 +-
 src/core/Index/DocInverterPerField.cs           |   2 +-
 src/core/Index/DocValuesProcessor.cs            |   2 +-
 src/core/Index/DocumentsWriterDeleteQueue.cs    |  10 ++
 src/core/Index/FilterAtomicReader.cs            |   2 +-
 src/core/Index/FilterDirectoryReader.cs         |   4 +-
 src/core/Index/FlushPolicy.cs                   |   2 +-
 src/core/Index/FreqProxTermsWriter.cs           |   2 +-
 src/core/Index/FreqProxTermsWriterPerField.cs   |   2 +-
 src/core/Index/IndexFileDeleter.cs              |   4 +-
 src/core/Index/IndexReader.cs                   |   2 +-
 src/core/Index/IndexUpgrader.cs                 |   2 +-
 src/core/Index/IndexWriter.cs                   |   6 +-
 src/core/Index/MultiReader.cs                   |   2 +-
 src/core/Index/ParallelAtomicReader.cs          |   2 +-
 src/core/Index/ParallelCompositeReader.cs       |   6 +-
 src/core/Index/SegmentReader.cs                 |   2 +-
 src/core/Index/SlowCompositeReaderWrapper.cs    |   2 +-
 src/core/Index/StandardDirectoryReader.cs       |   2 +-
 src/core/Lucene.Net.csproj                      |   1 +
 src/core/Search/ConstantScoreQuery.cs           |   7 +-
 src/core/Search/DocTermOrdsRewriteMethod.cs     |   2 +-
 src/core/Search/FieldCacheRewriteMethod.cs      |   2 +-
 src/core/Search/FieldCacheTermsFilter.cs        |   2 +-
 src/core/Search/FieldComparator.cs              |  18 +--
 src/core/Search/FieldValueFilter.cs             |   2 +-
 src/core/Search/FilteredQuery.cs                |   2 +-
 src/core/Search/FuzzyTermsEnum.cs               |   7 +-
 .../Search/IMaxNonCompetitiveBoostAttribute.cs  |   3 +
 src/core/Search/IndexSearcher.cs                |   2 +-
 src/core/Search/LiveFieldValues.cs              |   2 +-
 .../Search/MaxNonCompetitiveBoostAttribute.cs   |  51 ++++++
 src/core/Search/ReferenceManager.cs             |  14 +-
 src/core/Store/CompoundFileDirectory.cs         |   4 +-
 src/core/Store/DataOutput.cs                    |   6 +-
 src/core/Util/Attribute.cs                      |   9 +-
 .../Util/Automaton/Lev2ParametricDescription.cs |  86 +++++-----
 .../Automaton/Lev2TParametricDescription.cs     | 156 +++++++++----------
 src/core/Util/CharsRef.cs                       |  67 +++++++-
 src/core/Util/FixedBitSet.cs                    |   8 +-
 src/core/Util/Fst/FST.cs                        |   4 +-
 src/core/Util/Fst/FSTEnum.cs                    |   2 +-
 src/core/Util/Fst/ForwardBytesReader.cs         |   2 +-
 src/core/Util/OpenBitSetIterator.cs             |   4 +-
 src/core/Util/Packed/BulkOperationPacked.cs     |   4 +-
 56 files changed, 367 insertions(+), 220 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Codecs/Compressing/CompressionMode.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressionMode.cs b/src/core/Codecs/Compressing/CompressionMode.cs
index 0982fd0..92f5316 100644
--- a/src/core/Codecs/Compressing/CompressionMode.cs
+++ b/src/core/Codecs/Compressing/CompressionMode.cs
@@ -163,7 +163,7 @@ namespace Lucene.Net.Codecs.Compressing
 
             public override void Compress(sbyte[] bytes, int off, int len, DataOutput output)
             {
-                LZ4.Compress(bytes, off, len, output, ht);
+                LZ4.Compress((byte[])(Array)bytes, off, len, output, ht);
             }
 
         }
@@ -180,7 +180,7 @@ namespace Lucene.Net.Codecs.Compressing
 
             public override void Compress(sbyte[] bytes, int off, int len, DataOutput output)
             {
-                LZ4.CompressHC(bytes, off, len, output, ht);
+                LZ4.CompressHC((byte[])(Array)bytes, off, len, output, ht);
             }
 
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Codecs/Lucene3x/Lucene3xFields.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene3x/Lucene3xFields.cs b/src/core/Codecs/Lucene3x/Lucene3xFields.cs
index 1c6d6e5..6476d00 100644
--- a/src/core/Codecs/Lucene3x/Lucene3xFields.cs
+++ b/src/core/Codecs/Lucene3x/Lucene3xFields.cs
@@ -136,7 +136,7 @@ namespace Lucene.Net.Codecs.Lucene3x
         {
             get
             {
-                return TermsDict.Count;
+                return TermsDict.Size;
             }
         }
 
@@ -329,7 +329,7 @@ namespace Lucene.Net.Codecs.Lucene3x
 
                 // Test if the term we seek'd to in fact found a
                 // surrogate pair at the same position as the E:
-                Term t2 = te.term();
+                Term t2 = te.Term;
 
                 // Cannot be null (or move to next field) because at
                 // "worst" it'd seek to the same term we are on now,
@@ -1129,7 +1129,7 @@ namespace Lucene.Net.Codecs.Lucene3x
             {
                 if (docs.Next())
                 {
-                    return docID = docs.Doc();
+                    return docID = docs.Doc;
                 }
                 else
                 {
@@ -1141,7 +1141,7 @@ namespace Lucene.Net.Codecs.Lucene3x
             {
                 if (docs.SkipTo(target))
                 {
-                    return docID = docs.Doc();
+                    return docID = docs.Doc;
                 }
                 else
                 {
@@ -1198,7 +1198,7 @@ namespace Lucene.Net.Codecs.Lucene3x
             {
                 if (pos.Next())
                 {
-                    return docID = pos.Doc();
+                    return docID = pos.Doc;
                 }
                 else
                 {
@@ -1210,7 +1210,7 @@ namespace Lucene.Net.Codecs.Lucene3x
             {
                 if (pos.SkipTo(target))
                 {
-                    return docID = pos.Doc();
+                    return docID = pos.Doc;
                 }
                 else
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Codecs/Lucene3x/Lucene3xNormsProducer.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene3x/Lucene3xNormsProducer.cs b/src/core/Codecs/Lucene3x/Lucene3xNormsProducer.cs
index add071a..f5b5243 100644
--- a/src/core/Codecs/Lucene3x/Lucene3xNormsProducer.cs
+++ b/src/core/Codecs/Lucene3x/Lucene3xNormsProducer.cs
@@ -13,7 +13,7 @@ namespace Lucene.Net.Codecs.Lucene3x
     internal class Lucene3xNormsProducer : DocValuesProducer
     {
         /** norms header placeholder */
-        internal static readonly byte[] NORMS_HEADER = new byte[] { (byte)'N', (byte)'R', (byte)'M', (byte)(sbyte)-1 };
+        internal static readonly byte[] NORMS_HEADER = new byte[] { (byte)'N', (byte)'R', (byte)'M', unchecked((byte)-1) };
 
         /** Extension of norms file */
         internal const string NORMS_EXTENSION = "nrm";

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Codecs/Lucene40/Lucene40PostingsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene40/Lucene40PostingsReader.cs b/src/core/Codecs/Lucene40/Lucene40PostingsReader.cs
index 1917b75..997aa18 100644
--- a/src/core/Codecs/Lucene40/Lucene40PostingsReader.cs
+++ b/src/core/Codecs/Lucene40/Lucene40PostingsReader.cs
@@ -219,7 +219,7 @@ namespace Lucene.Net.Codecs.Lucene40
             if (CanReuse(reuse, liveDocs))
             {
                 // if (DEBUG) System.out.println("SPR.docs ts=" + termState);
-                return ((SegmentDocsEnumBase)reuse).reset(fieldInfo, (StandardTermState)termState);
+                return ((SegmentDocsEnumBase)reuse).Reset(fieldInfo, (StandardTermState)termState);
             }
             return NewDocsEnum(liveDocs, fieldInfo, (StandardTermState)termState);
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Codecs/Lucene41/Lucene41PostingsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene41/Lucene41PostingsReader.cs b/src/core/Codecs/Lucene41/Lucene41PostingsReader.cs
index 5b13030..4ccc1db 100644
--- a/src/core/Codecs/Lucene41/Lucene41PostingsReader.cs
+++ b/src/core/Codecs/Lucene41/Lucene41PostingsReader.cs
@@ -555,7 +555,7 @@ namespace Lucene.Net.Codecs.Lucene41
                     if (skipper == null)
                     {
                         // Lazy init: first time this enum has ever been used for skipping
-                        skipper = new Lucene41SkipReader(docIn.Clone(),
+                        skipper = new Lucene41SkipReader((IndexInput)docIn.Clone(),
                                                       Lucene41PostingsWriter.maxSkipLevels,
                                                       Lucene41PostingsFormat.BLOCK_SIZE,
                                                       indexHasPos,

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Document/DocumentStoredFieldVisitor.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/DocumentStoredFieldVisitor.cs b/src/core/Document/DocumentStoredFieldVisitor.cs
index 8cb2185..86fdfe6 100644
--- a/src/core/Document/DocumentStoredFieldVisitor.cs
+++ b/src/core/Document/DocumentStoredFieldVisitor.cs
@@ -42,7 +42,7 @@ namespace Lucene.Net.Documents
             ft.StoreTermVectors = fieldInfo.HasVectors;
             ft.Indexed = fieldInfo.IsIndexed;
             ft.OmitNorms = fieldInfo.OmitsNorms;
-            ft.IndexOptions = fieldInfo.IndexOptions;
+            ft.IndexOptions = fieldInfo.IndexOptionsValue.GetValueOrDefault();
             doc.Add(new Field(fieldInfo.name, value, ft));
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/AtomicReaderContext.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/AtomicReaderContext.cs b/src/core/Index/AtomicReaderContext.cs
index 4a804b3..9f69fb6 100644
--- a/src/core/Index/AtomicReaderContext.cs
+++ b/src/core/Index/AtomicReaderContext.cs
@@ -56,5 +56,15 @@ namespace Lucene.Net.Index
                 return reader;
             }
         }
+
+        // .NET Port: Can't change return type on override like Java, so adding helper property
+        // to avoid a bunch of casting.
+        public AtomicReader AtomicReader
+        {
+            get
+            {
+                return reader;
+            }
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/BaseCompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/BaseCompositeReader.cs b/src/core/Index/BaseCompositeReader.cs
index 81f8322..f5cbdb9 100644
--- a/src/core/Index/BaseCompositeReader.cs
+++ b/src/core/Index/BaseCompositeReader.cs
@@ -170,6 +170,6 @@ namespace Lucene.Net.Index
             return subReadersList.Cast<IndexReader>().ToList();
         }
 
-        protected override abstract void DoClose();
+        protected internal override abstract void DoClose();
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/CheckIndex.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CheckIndex.cs b/src/core/Index/CheckIndex.cs
index 4ea52da..c74195c 100644
--- a/src/core/Index/CheckIndex.cs
+++ b/src/core/Index/CheckIndex.cs
@@ -384,7 +384,7 @@ namespace Lucene.Net.Index
             }
 
             int numSegments = sis.Count;
-            var segmentsFileName = sis.GetCurrentSegmentFileName();
+            var segmentsFileName = sis.SegmentsFileName;
             IndexInput input = null;
             try
             {
@@ -617,8 +617,8 @@ namespace Lucene.Net.Index
                         infoStream.Write("    test: fields..............");
                     }
                     FieldInfos fieldInfos = reader.FieldInfos;
-                    Msg(infoStream, "OK [" + fieldInfos.Size() + " fields]");
-                    segInfoStat.numFields = fieldInfos.Size();
+                    Msg(infoStream, "OK [" + fieldInfos.Size + " fields]");
+                    segInfoStat.numFields = fieldInfos.Size;
 
                     // Test Field Norms
                     segInfoStat.fieldNormStatus = TestFieldNorms(reader, infoStream);
@@ -680,7 +680,7 @@ namespace Lucene.Net.Index
                 }
 
                 // Keeper
-                result.newSegments.Add((SegmentInfo)info.Clone());
+                result.newSegments.Add((SegmentInfoPerCommit)info.Clone());
             }
 
             if (0 == result.numBadSegments)
@@ -706,7 +706,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary> Test field norms.</summary>
-        private Status.FieldNormStatus TestFieldNorms(IEnumerable<string> fieldNames, SegmentReader reader)
+        private Status.FieldNormStatus TestFieldNorms(AtomicReader reader, StreamWriter infoStream)
         {
             var status = new Status.FieldNormStatus();
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/CompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CompositeReader.cs b/src/core/Index/CompositeReader.cs
index 205c48e..4a0d61b 100644
--- a/src/core/Index/CompositeReader.cs
+++ b/src/core/Index/CompositeReader.cs
@@ -74,7 +74,7 @@ namespace Lucene.Net.Index
 
         public abstract override void Document(int docID, StoredFieldVisitor visitor);
 
-        protected abstract override void DoClose();
+        protected internal abstract override void DoClose();
 
         public abstract override int DocFreq(Term term);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/ConcurrentMergeScheduler.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ConcurrentMergeScheduler.cs b/src/core/Index/ConcurrentMergeScheduler.cs
index 709e784..aae11e2 100644
--- a/src/core/Index/ConcurrentMergeScheduler.cs
+++ b/src/core/Index/ConcurrentMergeScheduler.cs
@@ -139,8 +139,8 @@ namespace Lucene.Net.Index
                 MergePolicy.OneMerge m1 = t1.CurrentMerge;
                 MergePolicy.OneMerge m2 = t2.CurrentMerge;
 
-                int c1 = m1 == null ? int.MaxValue : m1.TotalDocCount;
-                int c2 = m2 == null ? int.MaxValue : m2.TotalDocCount;
+                int c1 = m1 == null ? int.MaxValue : m1.totalDocCount;
+                int c2 = m2 == null ? int.MaxValue : m2.totalDocCount;
 
                 return c2 - c1;
             }
@@ -224,13 +224,13 @@ namespace Lucene.Net.Index
 
         private bool Verbose()
         {
-            return writer != null && writer.InfoStream.IsEnabled("CMS");
+            return writer != null && writer.infoStream.IsEnabled("CMS");
         }
 
         private void Message(String message)
         {
             if (Verbose())
-                writer.InfoStream.Message("CMS", message);
+                writer.infoStream.Message("CMS", message);
         }
 
         private void InitMergeThreadPriority()
@@ -532,7 +532,7 @@ namespace Lucene.Net.Index
 
                         // Subsequent times through the loop we do any new
                         // merge that writer says is necessary:
-                        merge = tWriter.GetNextMerge();
+                        merge = tWriter.NextMerge;
 
                         // Notify here in case any threads were stalled;
                         // they will notice that the pending merge has
@@ -546,7 +546,7 @@ namespace Lucene.Net.Index
                         {
                             parent.UpdateMergeThreads();
                             if (parent.Verbose())
-                                parent.Message("  merge thread: do another merge " + merge.SegString(merge.segments));
+                                parent.Message("  merge thread: do another merge " + tWriter.SegString(merge.segments));
                         }
                         else
                             break;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/DocFieldProcessor.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocFieldProcessor.cs b/src/core/Index/DocFieldProcessor.cs
index 14c41c1..e93ff84 100644
--- a/src/core/Index/DocFieldProcessor.cs
+++ b/src/core/Index/DocFieldProcessor.cs
@@ -230,7 +230,7 @@ namespace Lucene.Net.Index
                     // needs to be more "pluggable" such that if I want
                     // to have a new "thing" my Fields can do, I can
                     // easily add it
-                    FieldInfo fi = fieldInfos.AddOrUpdate(fieldName, field.FieldType);
+                    FieldInfo fi = fieldInfos.AddOrUpdate(fieldName, field.FieldTypeValue);
 
                     fp = new DocFieldProcessorPerField(this, fi);
                     fp.next = fieldHash[hashPos];
@@ -244,7 +244,7 @@ namespace Lucene.Net.Index
                 }
                 else
                 {
-                    fp.fieldInfo.Update(field.FieldType);
+                    fp.fieldInfo.Update(field.FieldTypeValue);
                 }
 
                 if (thisFieldGen != fp.lastGen)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/DocInverterPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocInverterPerField.cs b/src/core/Index/DocInverterPerField.cs
index 1d77aff..a2208d6 100644
--- a/src/core/Index/DocInverterPerField.cs
+++ b/src/core/Index/DocInverterPerField.cs
@@ -71,7 +71,7 @@ namespace Lucene.Net.Index
             {
 
                 IIndexableField field = fields[i];
-                IIndexableFieldType fieldType = field.FieldType;
+                IIndexableFieldType fieldType = field.FieldTypeValue;
 
                 // TODO FI: this should be "genericized" to querying
                 // consumer if it wants to see this particular field

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/DocValuesProcessor.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocValuesProcessor.cs b/src/core/Index/DocValuesProcessor.cs
index 5b94dce..4b90f37 100644
--- a/src/core/Index/DocValuesProcessor.cs
+++ b/src/core/Index/DocValuesProcessor.cs
@@ -66,7 +66,7 @@ namespace Lucene.Net.Index
         {
             if (writers.Count > 0)
             {
-                DocValuesFormat fmt = state.segmentInfo.Codec.DocValuesFormat();
+                DocValuesFormat fmt = state.segmentInfo.Codec.DocValuesFormat;
                 DocValuesConsumer dvConsumer = fmt.FieldsConsumer(state);
                 bool success = false;
                 try

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/DocumentsWriterDeleteQueue.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocumentsWriterDeleteQueue.cs b/src/core/Index/DocumentsWriterDeleteQueue.cs
index 358fcca..59ab8ad 100644
--- a/src/core/Index/DocumentsWriterDeleteQueue.cs
+++ b/src/core/Index/DocumentsWriterDeleteQueue.cs
@@ -432,5 +432,15 @@ namespace Lucene.Net.Index
                 }
             }
         }
+
+        public long BytesUsed
+        {
+            get { return Interlocked.Read(ref globalBufferedDeletes.bytesUsed); }
+        }
+
+        public override string ToString()
+        {
+            return "DWDQ: [ generation: " + generation + " ]";
+        }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/FilterAtomicReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FilterAtomicReader.cs b/src/core/Index/FilterAtomicReader.cs
index 16d49c3..b2d1bc1 100644
--- a/src/core/Index/FilterAtomicReader.cs
+++ b/src/core/Index/FilterAtomicReader.cs
@@ -307,7 +307,7 @@ namespace Lucene.Net.Index
             instance.Document(docID, visitor);
         }
 
-        protected override void DoClose()
+        protected internal override void DoClose()
         {
             instance.Dispose();
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/FilterDirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FilterDirectoryReader.cs b/src/core/Index/FilterDirectoryReader.cs
index 938e2f9..a76fbf1 100644
--- a/src/core/Index/FilterDirectoryReader.cs
+++ b/src/core/Index/FilterDirectoryReader.cs
@@ -43,7 +43,7 @@ namespace Lucene.Net.Index
         }
 
         public FilterDirectoryReader(DirectoryReader instance, SubReaderWrapper wrapper)
-            : base(instance.Directory, wrapper.Wrap(instance.GetSequentialSubReaders()))
+            : base(instance.Directory, wrapper.Wrap(instance.GetSequentialSubReaders().OfType<AtomicReader>().ToList()))
         {
             this.instance = instance;
         }
@@ -85,7 +85,7 @@ namespace Lucene.Net.Index
             get { return instance.IndexCommit; }
         }
 
-        protected override void DoClose()
+        protected internal override void DoClose()
         {
             instance.DoClose();
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/FlushPolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FlushPolicy.cs b/src/core/Index/FlushPolicy.cs
index 3ac40cc..9546022 100644
--- a/src/core/Index/FlushPolicy.cs
+++ b/src/core/Index/FlushPolicy.cs
@@ -40,7 +40,7 @@ namespace Lucene.Net.Index
             // the dwpt which needs to be flushed eventually
             ThreadState maxRamUsingThreadState = perThreadState;
             //assert !perThreadState.flushPending : "DWPT should have flushed";
-            IEnumerator<ThreadState> activePerThreadsIterator = control.AllActiveThreadStates();
+            IEnumerator<ThreadState> activePerThreadsIterator = control.AllActiveThreadStates;
             while (activePerThreadsIterator.MoveNext())
             {
                 ThreadState next = activePerThreadsIterator.Current;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/FreqProxTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FreqProxTermsWriter.cs b/src/core/Index/FreqProxTermsWriter.cs
index b5d1d11..586fd35 100644
--- a/src/core/Index/FreqProxTermsWriter.cs
+++ b/src/core/Index/FreqProxTermsWriter.cs
@@ -54,7 +54,7 @@ namespace Lucene.Net.Index
             // Sort by field name
             CollectionUtil.QuickSort(allFields);
 
-            FieldsConsumer consumer = state.segmentInfo.Codec.PostingsFormat().FieldsConsumer(state);
+            FieldsConsumer consumer = state.segmentInfo.Codec.PostingsFormat.FieldsConsumer(state);
 
             bool success = false;
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/FreqProxTermsWriterPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FreqProxTermsWriterPerField.cs b/src/core/Index/FreqProxTermsWriterPerField.cs
index fed6bea..8726b6f 100644
--- a/src/core/Index/FreqProxTermsWriterPerField.cs
+++ b/src/core/Index/FreqProxTermsWriterPerField.cs
@@ -113,7 +113,7 @@ namespace Lucene.Net.Index
         {
             for (int i = 0; i < count; i++)
             {
-                if (fields[i].FieldType.Indexed)
+                if (fields[i].FieldTypeValue.Indexed)
                 {
                     return true;
                 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/IndexFileDeleter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexFileDeleter.cs b/src/core/Index/IndexFileDeleter.cs
index b0a3a6c..6f37614 100644
--- a/src/core/Index/IndexFileDeleter.cs
+++ b/src/core/Index/IndexFileDeleter.cs
@@ -213,7 +213,7 @@ namespace Lucene.Net.Index
                             }
                             if (sis != null)
                             {
-                                CommitPoint commitPoint = new CommitPoint(commitsToDelete, directory, sis);
+                                CommitPoint commitPoint = new CommitPoint(this, commitsToDelete, directory, sis);
                                 if (sis.Generation == segmentInfos.Generation)
                                 {
                                     currentCommitPoint = commitPoint;
@@ -253,7 +253,7 @@ namespace Lucene.Net.Index
                 {
                     infoStream.Message("IFD", "forced open of current segments file " + segmentInfos.SegmentsFileName);
                 }
-                currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis);
+                currentCommitPoint = new CommitPoint(this, commitsToDelete, directory, sis);
                 commits.Add(currentCommitPoint);
                 IncRef(sis, true);
             }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/IndexReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexReader.cs b/src/core/Index/IndexReader.cs
index a2c00dd..1276c89 100644
--- a/src/core/Index/IndexReader.cs
+++ b/src/core/Index/IndexReader.cs
@@ -394,7 +394,7 @@ namespace Lucene.Net.Index
         }
 
         /// <summary>Implements close. </summary>
-        protected abstract void DoClose();
+        protected internal abstract void DoClose();
 
         public abstract IndexReaderContext Context { get; }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/IndexUpgrader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexUpgrader.cs b/src/core/Index/IndexUpgrader.cs
index 52358ff..daec1fe 100644
--- a/src/core/Index/IndexUpgrader.cs
+++ b/src/core/Index/IndexUpgrader.cs
@@ -122,7 +122,7 @@ namespace Lucene.Net.Index
             }
 
             IndexWriterConfig c = (IndexWriterConfig)iwc.Clone();
-            c.MergePolicy = new UpgradeIndexMergePolicy(c.MergePolicy);
+            c.SetMergePolicy(new UpgradeIndexMergePolicy(c.MergePolicy));
             c.SetIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
 
             IndexWriter w = new IndexWriter(dir, c);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/IndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexWriter.cs b/src/core/Index/IndexWriter.cs
index 2f4c37b..99c1d33 100644
--- a/src/core/Index/IndexWriter.cs
+++ b/src/core/Index/IndexWriter.cs
@@ -1176,7 +1176,7 @@ namespace Lucene.Net.Index
                     // Composite reader: lookup sub-reader and re-base docID:
                     IList<AtomicReaderContext> leaves = readerIn.Leaves;
                     int subIndex = ReaderUtil.SubIndex(docID, leaves);
-                    reader = leaves[subIndex].Reader;
+                    reader = leaves[subIndex].AtomicReader;
                     docID -= leaves[subIndex].docBase;
                     //assert docID >= 0;
                     //assert docID < reader.maxDoc();
@@ -1742,7 +1742,7 @@ namespace Lucene.Net.Index
                 mergeScheduler.Dispose();
 
                 bufferedDeletesStream.Clear();
-                docWriter.Close(); // mark it as closed first to prevent subsequent indexing actions/flushes 
+                docWriter.Dispose(); // mark it as closed first to prevent subsequent indexing actions/flushes 
                 docWriter.Abort(); // don't sync on IW here
                 lock (this)
                 {
@@ -2196,7 +2196,7 @@ namespace Lucene.Net.Index
                     numDocs += indexReader.NumDocs;
                     foreach (AtomicReaderContext ctx in indexReader.Leaves)
                     {
-                        mergeReaders.Add(ctx.Reader);
+                        mergeReaders.Add(ctx.AtomicReader);
                     }
                 }
                 IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1));

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/MultiReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/MultiReader.cs b/src/core/Index/MultiReader.cs
index f1c31cc..c6c3778 100644
--- a/src/core/Index/MultiReader.cs
+++ b/src/core/Index/MultiReader.cs
@@ -64,7 +64,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        protected override void DoClose()
+        protected internal override void DoClose()
         {
             System.IO.IOException ioe = null;
             foreach (IndexReader r in GetSequentialSubReaders())

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/ParallelAtomicReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ParallelAtomicReader.cs b/src/core/Index/ParallelAtomicReader.cs
index dc49acd..ea4fd73 100644
--- a/src/core/Index/ParallelAtomicReader.cs
+++ b/src/core/Index/ParallelAtomicReader.cs
@@ -219,7 +219,7 @@ namespace Lucene.Net.Index
             return fields;
         }
 
-        protected override void DoClose()
+        protected internal override void DoClose()
         {
             System.IO.IOException ioe = null;
             foreach (AtomicReader reader in completeReaderSet)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/ParallelCompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ParallelCompositeReader.cs b/src/core/Index/ParallelCompositeReader.cs
index c3452b0..c9aaf7c 100644
--- a/src/core/Index/ParallelCompositeReader.cs
+++ b/src/core/Index/ParallelCompositeReader.cs
@@ -113,7 +113,7 @@ namespace Lucene.Net.Index
             {
             }
 
-            protected override void DoClose()
+            protected internal override void DoClose()
             {
             }
         }
@@ -125,7 +125,7 @@ namespace Lucene.Net.Index
             {
             }
 
-            protected override void DoClose()
+            protected internal override void DoClose()
             {
             }
         }
@@ -160,7 +160,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        protected override void DoClose()
+        protected internal override void DoClose()
         {
             System.IO.IOException ioe = null;
             foreach (IndexReader reader in completeReaderSet)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/SegmentReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SegmentReader.cs b/src/core/Index/SegmentReader.cs
index ba84bf4..cf4f7f9 100644
--- a/src/core/Index/SegmentReader.cs
+++ b/src/core/Index/SegmentReader.cs
@@ -103,7 +103,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        protected override void DoClose()
+        protected internal override void DoClose()
         {
             core.DecRef();
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/SlowCompositeReaderWrapper.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/SlowCompositeReaderWrapper.cs b/src/core/Index/SlowCompositeReaderWrapper.cs
index 6542b13..958a627 100644
--- a/src/core/Index/SlowCompositeReaderWrapper.cs
+++ b/src/core/Index/SlowCompositeReaderWrapper.cs
@@ -221,7 +221,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        protected override void DoClose()
+        protected internal override void DoClose()
         {
             // TODO: as this is a wrapper, should we really close the delegate?
             in_renamed.Dispose();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Index/StandardDirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/StandardDirectoryReader.cs b/src/core/Index/StandardDirectoryReader.cs
index dd9c006..4ec5e91 100644
--- a/src/core/Index/StandardDirectoryReader.cs
+++ b/src/core/Index/StandardDirectoryReader.cs
@@ -415,7 +415,7 @@ namespace Lucene.Net.Index
             }
         }
 
-        protected override void DoClose()
+        protected internal override void DoClose()
         {
             Exception firstExc = null;
             foreach (AtomicReader r in GetSequentialSubReaders())

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Lucene.Net.csproj
----------------------------------------------------------------------
diff --git a/src/core/Lucene.Net.csproj b/src/core/Lucene.Net.csproj
index de68b1d..4b17cab 100644
--- a/src/core/Lucene.Net.csproj
+++ b/src/core/Lucene.Net.csproj
@@ -585,6 +585,7 @@
     <Compile Include="Search\MatchAllDocsQuery.cs">
       <SubType>Code</SubType>
     </Compile>
+    <Compile Include="Search\MaxNonCompetitiveBoostAttribute.cs" />
     <Compile Include="Search\MinShouldMatchSumScorer.cs" />
     <Compile Include="Search\MultiPhraseQuery.cs">
       <SubType>Code</SubType>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/ConstantScoreQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ConstantScoreQuery.cs b/src/core/Search/ConstantScoreQuery.cs
index 17ccd6d..26cfef6 100644
--- a/src/core/Search/ConstantScoreQuery.cs
+++ b/src/core/Search/ConstantScoreQuery.cs
@@ -112,7 +112,8 @@ namespace Lucene.Net.Search
                 get
                 {
                     // we calculate sumOfSquaredWeights of the inner weight, but ignore it (just to initialize everything)
-                    if (innerWeight != null) innerWeight.ValueForNormalization;
+                    // .NET Port: was this a bug in the Java code?
+                    //if (innerWeight != null) innerWeight.ValueForNormalization;
                     queryWeight = enclosingInstance.Boost;
                     return queryWeight * queryWeight;
                 }
@@ -149,7 +150,7 @@ namespace Lucene.Net.Search
                 {
                     return null;
                 }
-                return new ConstantScorer(disi, this, queryWeight);
+                return new ConstantScorer(enclosingInstance, disi, this, queryWeight);
             }
 
             public override bool ScoresDocsOutOfOrder
@@ -162,7 +163,7 @@ namespace Lucene.Net.Search
 
             public override Explanation Explain(AtomicReaderContext context, int doc)
             {
-                Scorer cs = Scorer(context, true, false, context.Reader.LiveDocs);
+                Scorer cs = Scorer(context, true, false, ((AtomicReader)context.Reader).LiveDocs);
                 bool exists = (cs != null && cs.Advance(doc) == doc);
 
                 ComplexExplanation result = new ComplexExplanation();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/DocTermOrdsRewriteMethod.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/DocTermOrdsRewriteMethod.cs b/src/core/Search/DocTermOrdsRewriteMethod.cs
index fcf85ab..f30a367 100644
--- a/src/core/Search/DocTermOrdsRewriteMethod.cs
+++ b/src/core/Search/DocTermOrdsRewriteMethod.cs
@@ -57,7 +57,7 @@ namespace Lucene.Net.Search
 
             public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
             {
-                SortedSetDocValues docTermOrds = FieldCache.DEFAULT.GetDocTermOrds(context.Reader, query.Field);
+                SortedSetDocValues docTermOrds = FieldCache.DEFAULT.GetDocTermOrds((AtomicReader)context.Reader, query.Field);
                 // Cannot use FixedBitSet because we require long index (ord):
                 OpenBitSet termSet = new OpenBitSet(docTermOrds.ValueCount);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/FieldCacheRewriteMethod.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FieldCacheRewriteMethod.cs b/src/core/Search/FieldCacheRewriteMethod.cs
index d86bbfc..03a57a5 100644
--- a/src/core/Search/FieldCacheRewriteMethod.cs
+++ b/src/core/Search/FieldCacheRewriteMethod.cs
@@ -54,7 +54,7 @@ namespace Lucene.Net.Search
 
             public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
             {
-                SortedDocValues fcsi = FieldCache.DEFAULT.GetTermsIndex(context.Reader, query.Field);
+                SortedDocValues fcsi = FieldCache.DEFAULT.GetTermsIndex(context.AtomicReader, query.Field);
                 // Cannot use FixedBitSet because we require long index (ord):
                 OpenBitSet termSet = new OpenBitSet(fcsi.ValueCount);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/FieldCacheTermsFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FieldCacheTermsFilter.cs b/src/core/Search/FieldCacheTermsFilter.cs
index d1d6303..0776d2f 100644
--- a/src/core/Search/FieldCacheTermsFilter.cs
+++ b/src/core/Search/FieldCacheTermsFilter.cs
@@ -120,7 +120,7 @@ namespace Lucene.Net.Search
 
         public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
         {
-            SortedDocValues fcsi = FieldCache.GetTermsIndex(context.Reader, field);
+            SortedDocValues fcsi = FieldCache.GetTermsIndex(context.AtomicReader, field);
             FixedBitSet bits = new FixedBitSet(fcsi.ValueCount);
             for (int i = 0; i < terms.Length; i++)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/FieldComparator.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FieldComparator.cs b/src/core/Search/FieldComparator.cs
index 59ab91e..7caead2 100644
--- a/src/core/Search/FieldComparator.cs
+++ b/src/core/Search/FieldComparator.cs
@@ -223,7 +223,7 @@ namespace Lucene.Net.Search
             {
                 if (missingValue != null)
                 {
-                    docsWithField = FieldCache.DEFAULT.GetDocsWithField(context.Reader, field);
+                    docsWithField = FieldCache.DEFAULT.GetDocsWithField(context.AtomicReader, field);
                     // optimization to remove unneeded checks on the bit interface:
                     if (docsWithField is Bits.MatchAllBits)
                     {
@@ -289,7 +289,7 @@ namespace Lucene.Net.Search
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
-                currentReaderValues = FieldCache.DEFAULT.GetBytes(context.Reader, field, parser, missingValue != null);
+                currentReaderValues = FieldCache.DEFAULT.GetBytes(context.AtomicReader, field, parser, missingValue != null);
                 return base.SetNextReader(context);
             }
 
@@ -369,7 +369,7 @@ namespace Lucene.Net.Search
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
-                currentReaderValues = FieldCache.DEFAULT.GetDoubles(context.Reader, field, parser, missingValue != null);
+                currentReaderValues = FieldCache.DEFAULT.GetDoubles(context.AtomicReader, field, parser, missingValue != null);
                 return base.SetNextReader(context);
             }
 
@@ -450,7 +450,7 @@ namespace Lucene.Net.Search
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
-                currentReaderValues = FieldCache.DEFAULT.GetFloats(context.Reader, field, parser, missingValue != null);
+                currentReaderValues = FieldCache.DEFAULT.GetFloats(context.AtomicReader, field, parser, missingValue != null);
                 return base.SetNextReader(context);
             }
 
@@ -530,7 +530,7 @@ namespace Lucene.Net.Search
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
-                currentReaderValues = FieldCache.DEFAULT.GetShorts(context.Reader, field, parser, missingValue != null);
+                currentReaderValues = FieldCache.DEFAULT.GetShorts(context.AtomicReader, field, parser, missingValue != null);
                 return base.SetNextReader(context);
             }
 
@@ -642,7 +642,7 @@ namespace Lucene.Net.Search
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
-                currentReaderValues = FieldCache.DEFAULT.GetInts(context.Reader, field, parser, missingValue != null);
+                currentReaderValues = FieldCache.DEFAULT.GetInts(context.AtomicReader, field, parser, missingValue != null);
                 return base.SetNextReader(context);
             }
 
@@ -761,7 +761,7 @@ namespace Lucene.Net.Search
             {
                 // NOTE: must do this before calling super otherwise
                 // we compute the docsWithField Bits twice!
-                currentReaderValues = FieldCache.DEFAULT.GetLongs(context.Reader, field, parser, missingValue != null);
+                currentReaderValues = FieldCache.DEFAULT.GetLongs(context.AtomicReader, field, parser, missingValue != null);
                 return base.SetNextReader(context);
             }
 
@@ -1134,7 +1134,7 @@ namespace Lucene.Net.Search
             public override FieldComparator<BytesRef> SetNextReader(AtomicReaderContext context)
             {
                 int docBase = context.docBase;
-                termsIndex = FieldCache.DEFAULT.GetTermsIndex(context.Reader, field);
+                termsIndex = FieldCache.DEFAULT.GetTermsIndex(context.AtomicReader, field);
                 FieldComparator<BytesRef> perSegComp = new AnyOrdComparator(this, termsIndex, docBase);
                 currentReaderGen++;
                 if (bottomSlot != -1)
@@ -1254,7 +1254,7 @@ namespace Lucene.Net.Search
 
             public override FieldComparator<BytesRef> SetNextReader(AtomicReaderContext context)
             {
-                docTerms = FieldCache.DEFAULT.GetTerms(context.Reader, field);
+                docTerms = FieldCache.DEFAULT.GetTerms(context.AtomicReader, field);
                 return this;
             }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/FieldValueFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FieldValueFilter.cs b/src/core/Search/FieldValueFilter.cs
index a38f93c..fcccda0 100644
--- a/src/core/Search/FieldValueFilter.cs
+++ b/src/core/Search/FieldValueFilter.cs
@@ -35,7 +35,7 @@ namespace Lucene.Net.Search
 
         public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs)
         {
-            IBits docsWithField = FieldCache.DEFAULT.GetDocsWithField(context.Reader, field);
+            IBits docsWithField = FieldCache.DEFAULT.GetDocsWithField(context.AtomicReader, field);
 
             if (negate)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/FilteredQuery.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FilteredQuery.cs b/src/core/Search/FilteredQuery.cs
index 076e7bc..515b2b9 100644
--- a/src/core/Search/FilteredQuery.cs
+++ b/src/core/Search/FilteredQuery.cs
@@ -118,7 +118,7 @@ namespace Lucene.Net.Search
             {
                 Explanation inner = weight.Explain(ir, i);
                 Filter f = enclosingInstance.filter;
-                DocIdSet docIdSet = f.GetDocIdSet(ir, ir.Reader.LiveDocs);
+                DocIdSet docIdSet = f.GetDocIdSet(ir, ir.AtomicReader.LiveDocs);
                 DocIdSetIterator docIdSetIterator = docIdSet == null ? DocIdSet.EMPTY_DOCIDSET.Iterator() : docIdSet.Iterator();
                 if (docIdSetIterator == null)
                 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/FuzzyTermsEnum.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/FuzzyTermsEnum.cs b/src/core/Search/FuzzyTermsEnum.cs
index 4aa6ff0..dd680af 100644
--- a/src/core/Search/FuzzyTermsEnum.cs
+++ b/src/core/Search/FuzzyTermsEnum.cs
@@ -21,6 +21,7 @@ using Lucene.Net.Util;
 using Lucene.Net.Util.Automaton;
 using System;
 using System.Collections.Generic;
+using System.Linq;
 using IndexReader = Lucene.Net.Index.IndexReader;
 using Term = Lucene.Net.Index.Term;
 
@@ -119,7 +120,7 @@ namespace Lucene.Net.Search
 
         protected TermsEnum GetAutomatonEnum(int editDistance, BytesRef lastTerm)
         {
-            List<CompiledAutomaton> runAutomata = InitAutomata(editDistance);
+            IList<CompiledAutomaton> runAutomata = InitAutomata(editDistance);
             if (editDistance < runAutomata.Count)
             {
                 //if (BlockTreeTermsWriter.DEBUG) System.out.println("FuzzyTE.getAEnum: ed=" + editDistance + " lastTerm=" + (lastTerm==null ? "null" : lastTerm.utf8ToString()));
@@ -133,9 +134,9 @@ namespace Lucene.Net.Search
             }
         }
 
-        private List<CompiledAutomaton> InitAutomata(int maxDistance)
+        private IList<CompiledAutomaton> InitAutomata(int maxDistance)
         {
-            List<CompiledAutomaton> runAutomata = dfaAtt.Automata;
+            IList<CompiledAutomaton> runAutomata = dfaAtt.Automata;
             //System.out.println("cached automata size: " + runAutomata.size());
             if (runAutomata.Count <= maxDistance &&
                 maxDistance <= LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/IMaxNonCompetitiveBoostAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/IMaxNonCompetitiveBoostAttribute.cs b/src/core/Search/IMaxNonCompetitiveBoostAttribute.cs
index 0d0b8f7..f79c618 100644
--- a/src/core/Search/IMaxNonCompetitiveBoostAttribute.cs
+++ b/src/core/Search/IMaxNonCompetitiveBoostAttribute.cs
@@ -4,5 +4,8 @@ namespace Lucene.Net.Search
 {
     public interface IMaxNonCompetitiveBoostAttribute : IAttribute
     {
+        float MaxNonCompetitiveBoost { get; set; }
+
+        BytesRef CompetitiveTerm { get; set; }
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/IndexSearcher.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/IndexSearcher.cs b/src/core/Search/IndexSearcher.cs
index d4bc615..03f5ccc 100644
--- a/src/core/Search/IndexSearcher.cs
+++ b/src/core/Search/IndexSearcher.cs
@@ -383,7 +383,7 @@ namespace Lucene.Net.Search
                     // continue with the following leaf
                     continue;
                 }
-                Scorer scorer = weight.Scorer(ctx, !collector.AcceptsDocsOutOfOrder, true, ctx.Reader.LiveDocs);
+                Scorer scorer = weight.Scorer(ctx, !collector.AcceptsDocsOutOfOrder, true, ctx.AtomicReader.LiveDocs);
                 if (scorer != null)
                 {
                     try

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/LiveFieldValues.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/LiveFieldValues.cs b/src/core/Search/LiveFieldValues.cs
index e745711..2ef729e 100644
--- a/src/core/Search/LiveFieldValues.cs
+++ b/src/core/Search/LiveFieldValues.cs
@@ -4,7 +4,7 @@ using Lucene.Net.Support;
 
 namespace Lucene.Net.Search
 {
-    public abstract class LiveFieldValues<T> : ReferenceManager<T>.RefreshListener, IDisposable
+    public abstract class LiveFieldValues<T> : ReferenceManager.RefreshListener, IDisposable
         where T : class
     {
         private volatile IDictionary<string, T> current = new ConcurrentHashMap<string, T>();

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/MaxNonCompetitiveBoostAttribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/MaxNonCompetitiveBoostAttribute.cs b/src/core/Search/MaxNonCompetitiveBoostAttribute.cs
new file mode 100644
index 0000000..4c3636f
--- /dev/null
+++ b/src/core/Search/MaxNonCompetitiveBoostAttribute.cs
@@ -0,0 +1,51 @@
+using Lucene.Net.Util;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Search
+{
+    public sealed class MaxNonCompetitiveBoostAttribute : Lucene.Net.Util.Attribute, IMaxNonCompetitiveBoostAttribute
+    {
+        private float maxNonCompetitiveBoost = float.NegativeInfinity;
+        private BytesRef competitiveTerm = null;
+        
+        public float MaxNonCompetitiveBoost
+        {
+            get
+            {
+                return maxNonCompetitiveBoost;
+            }
+            set
+            {
+                this.maxNonCompetitiveBoost = value;
+            }
+        }
+
+        public BytesRef CompetitiveTerm
+        {
+            get
+            {
+                return competitiveTerm;
+            }
+            set
+            {
+                this.competitiveTerm = value;
+            }
+        }
+
+        public override void Clear()
+        {
+            maxNonCompetitiveBoost = float.NegativeInfinity;
+            competitiveTerm = null;
+        }
+
+        public override void CopyTo(Util.Attribute target)
+        {
+            MaxNonCompetitiveBoostAttribute t = (MaxNonCompetitiveBoostAttribute)target;
+            t.MaxNonCompetitiveBoost = maxNonCompetitiveBoost;
+            t.CompetitiveTerm = competitiveTerm;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Search/ReferenceManager.cs
----------------------------------------------------------------------
diff --git a/src/core/Search/ReferenceManager.cs b/src/core/Search/ReferenceManager.cs
index 8dcacc2..1741eeb 100644
--- a/src/core/Search/ReferenceManager.cs
+++ b/src/core/Search/ReferenceManager.cs
@@ -15,7 +15,7 @@ namespace Lucene.Net.Search
 		//private Lock refreshLock = new ReentrantLock();
 		private readonly ReentrantLock refreshLock = new ReentrantLock();
 		
-        private readonly ISet<RefreshListener> refreshListeners = new ConcurrentHashSet<RefreshListener>(new IdentityComparer<RefreshListener>());
+        private readonly ISet<ReferenceManager.RefreshListener> refreshListeners = new ConcurrentHashSet<ReferenceManager.RefreshListener>(new IdentityComparer<ReferenceManager.RefreshListener>());
 
 		private void EnsureOpen()
 		{
@@ -263,7 +263,7 @@ namespace Lucene.Net.Search
 
 		private void NotifyRefreshListenersBefore()
 		{
-			foreach (RefreshListener refreshListener in refreshListeners)
+			foreach (ReferenceManager.RefreshListener refreshListener in refreshListeners)
 			{
 				refreshListener.BeforeRefresh();
 			}
@@ -271,7 +271,7 @@ namespace Lucene.Net.Search
 
 		private void NotifyRefreshListenersRefreshed(bool didRefresh)
 		{
-			foreach (RefreshListener refreshListener in refreshListeners)
+            foreach (ReferenceManager.RefreshListener refreshListener in refreshListeners)
 			{
 				refreshListener.AfterRefresh(didRefresh);
 			}
@@ -280,7 +280,7 @@ namespace Lucene.Net.Search
 		/**
 		 * Adds a listener, to be notified when a reference is refreshed/swapped.
 		 */
-		public void AddListener(RefreshListener listener)
+        public void AddListener(ReferenceManager.RefreshListener listener)
 		{
 			if (listener == null)
 			{
@@ -292,7 +292,7 @@ namespace Lucene.Net.Search
 		/**
 		 * Remove a listener added with {@link #addListener(RefreshListener)}.
 		 */
-		public void RemoveListener(RefreshListener listener)
+        public void RemoveListener(ReferenceManager.RefreshListener listener)
 		{
 			if (listener == null)
 			{
@@ -301,6 +301,10 @@ namespace Lucene.Net.Search
 			refreshListeners.Remove(listener);
 		}
 
+    }
+    // .NET Port: non-generic type to hold RefreshListener
+    public static class ReferenceManager
+    {
 		/** Use to receive notification when a refresh has
 		 *  finished.  See {@link #addListener}. */
 		public interface RefreshListener

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Store/CompoundFileDirectory.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/CompoundFileDirectory.cs b/src/core/Store/CompoundFileDirectory.cs
index cb92f8e..6bfa431 100644
--- a/src/core/Store/CompoundFileDirectory.cs
+++ b/src/core/Store/CompoundFileDirectory.cs
@@ -30,7 +30,7 @@ namespace Lucene.Net.Store
         {
             this.directory = directory;
             this.fileName = fileName;
-            this.readBufferSize = BufferedIndexInput.BufferSize(context);
+            this.readBufferSize = BufferedIndexInput.GetBufferSize(context);
             this.isOpen = false;
             this.openForWrite = openForWrite;
             if (!openForWrite)
@@ -65,7 +65,7 @@ namespace Lucene.Net.Store
         private static readonly byte CODEC_MAGIC_BYTE1 = (byte)Number.URShift(CodecUtil.CODEC_MAGIC, 24);
         private static readonly byte CODEC_MAGIC_BYTE2 = (byte)Number.URShift(CodecUtil.CODEC_MAGIC, 16);
         private static readonly byte CODEC_MAGIC_BYTE3 = (byte)Number.URShift(CodecUtil.CODEC_MAGIC, 8);
-        private static readonly byte CODEC_MAGIC_BYTE4 = (byte)CodecUtil.CODEC_MAGIC;
+        private static readonly byte CODEC_MAGIC_BYTE4 = unchecked((byte)CodecUtil.CODEC_MAGIC);
 
         private static IDictionary<string, FileEntry> ReadEntries(IndexInputSlicer handle, Directory dir, string name)
         {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Store/DataOutput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/DataOutput.cs b/src/core/Store/DataOutput.cs
index cb78f34..2368b6f 100644
--- a/src/core/Store/DataOutput.cs
+++ b/src/core/Store/DataOutput.cs
@@ -19,11 +19,7 @@ namespace Lucene.Net.Store
 
         public void WriteBytes(sbyte[] b, int offset, int length)
         {
-            // helper method to account for java's byte being signed
-            byte[] ubytes = new byte[b.Length];
-            Support.Buffer.BlockCopy(b, 0, ubytes, 0, b.Length);
-
-            WriteBytes(ubytes, offset, length);
+            WriteBytes((byte[])(Array)b, offset, length);
         }
 
         public virtual void WriteBytes(byte[] b, int length)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/Attribute.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Attribute.cs b/src/core/Util/Attribute.cs
index 1f11a3a..c022339 100644
--- a/src/core/Util/Attribute.cs
+++ b/src/core/Util/Attribute.cs
@@ -83,14 +83,19 @@ namespace Lucene.Net.Util
         public virtual void ReflectWith(IAttributeReflector reflector)
         {
             Type clazz = this.GetType();
-            LinkedList<Type> interfaces = AttributeSource.GetAttributeInterfaces(clazz);
+            LinkedList<WeakReference> interfaces = AttributeSource.GetAttributeInterfaces(clazz);
 
             if (interfaces.Count != 1)
             {
                 throw new NotSupportedException(clazz.Name + " implements more than one Attribute interface, the default ReflectWith() implementation cannot handle this.");
             }
 
-            Type interf = interfaces.First.Value;
+            object target = interfaces.First.Value.Get();
+
+            if (target == null)
+                return;
+
+            Type interf = target as Type;
 
             FieldInfo[] fields = clazz.GetFields(BindingFlags.Instance | BindingFlags.Public);
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/Automaton/Lev2ParametricDescription.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Automaton/Lev2ParametricDescription.cs b/src/core/Util/Automaton/Lev2ParametricDescription.cs
index c68a80a..85a9741 100644
--- a/src/core/Util/Automaton/Lev2ParametricDescription.cs
+++ b/src/core/Util/Automaton/Lev2ParametricDescription.cs
@@ -110,9 +110,9 @@ namespace Lucene.Net.Util.Automaton
 
         // 8 vectors; 21 states per vector; array length = 168
         private readonly static long[] toStates3 = new long[] /*5 bits per value */ {
-            0x380e014a051404L,0xe28245009451140L,(long)0x8a26880098a6268cL,0x180a288ca0246213L,
+            0x380e014a051404L,0xe28245009451140L,unchecked((long)0x8a26880098a6268cL),0x180a288ca0246213L,
             0x494053284a1080e1L,0x510265a89c311940L,0x4218c41188a6509cL,0x6340c4211c4710dL,
-            (long)0xa168398471882a12L,0x104c841c683a0425L,0x3294472904351483L,(long)0xe6290620a84a20d0L,
+            unchecked((long)0xa168398471882a12L),0x104c841c683a0425L,0x3294472904351483L,unchecked((long)0xe6290620a84a20d0L),
             0x1441a0ea2896a4a0L,0x32L
           };
         private readonly static long[] offsetIncrs3 = new long[] /*2 bits per value */ {
@@ -123,59 +123,59 @@ namespace Lucene.Net.Util.Automaton
         // 16 vectors; 30 states per vector; array length = 480
         private readonly static long[] toStates4 = new long[] /*5 bits per value */ {
             0x380e014a051404L,0xaa015452940L,0x55014501000000L,0x1843ddc771085c07L,
-            0x7141200040108405L,0x52b44004c5313460L,0x401080200063115cL,(long)0x85314c4d181c5048L,
-            0x1440190a3e5c7828L,0x28a232809100a21L,(long)0xa028ca2a84203846L,(long)0xca0240010800108aL,
-            (long)0xc7b4205c1580a508L,0x1021090251846b6L,0x4cb513862328090L,0x210863128ca2b8a2L,
-            0x4e188ca024402940L,0xa6b6c7c520532d4L,(long)0x8c41101451150219L,(long)0xa0c4211c4710d421L,
-            0x2108421094e15063L,(long)0x8f13c43708631044L,0x18274d908c611631L,0x1cc238c411098263L,
-            0x450e3a1d0212d0b4L,0x31050242048108c6L,0xfa318b42d07308eL,(long)0xa8865182356907c6L,
-            0x1ca410d4520c4140L,0x2954e13883a0ca51L,0x3714831044229442L,(long)0x93946116b58f2c84L,
-            (long)0xc41109a5631a574dL,0x1d4512d4941cc520L,0x52848294c643883aL,(long)0xb525073148310502L,
-            (long)0xa5356939460f7358L,0x409ca651L
+            0x7141200040108405L,0x52b44004c5313460L,0x401080200063115cL,unchecked((long)0x85314c4d181c5048L),
+            0x1440190a3e5c7828L,0x28a232809100a21L,unchecked((long)0xa028ca2a84203846L),unchecked((long)0xca0240010800108aL),
+            unchecked((long)0xc7b4205c1580a508L),0x1021090251846b6L,0x4cb513862328090L,0x210863128ca2b8a2L,
+            0x4e188ca024402940L,0xa6b6c7c520532d4L,unchecked((long)0x8c41101451150219L),unchecked((long)0xa0c4211c4710d421L),
+            0x2108421094e15063L,unchecked((long)0x8f13c43708631044L),0x18274d908c611631L,0x1cc238c411098263L,
+            0x450e3a1d0212d0b4L,0x31050242048108c6L,0xfa318b42d07308eL,unchecked((long)0xa8865182356907c6L),
+            0x1ca410d4520c4140L,0x2954e13883a0ca51L,0x3714831044229442L,unchecked((long)0x93946116b58f2c84L),
+            unchecked((long)0xc41109a5631a574dL),0x1d4512d4941cc520L,0x52848294c643883aL,unchecked((long)0xb525073148310502L),
+            unchecked((long)0xa5356939460f7358L),0x409ca651L
           };
         private readonly static long[] offsetIncrs4 = new long[] /*3 bits per value */ {
             0x20c0600000010000L,0x2000040000000001L,0x209204a40209L,0x301b6c0618018618L,
-            0x207206186000186cL,0x1200061b8e06dc0L,0x480492080612010L,(long)0xa20204a040048000L,
-            0x1061a0000129124L,0x1848349b680612L,(long)0xd26da0204a041868L,0x2492492492496128L,
-            (long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,
-            0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,
-            0x2492492492492492L,(long)0x9249249249249249L,0x24924924L
+            0x207206186000186cL,0x1200061b8e06dc0L,0x480492080612010L,unchecked((long)0xa20204a040048000L),
+            0x1061a0000129124L,0x1848349b680612L,unchecked((long)0xd26da0204a041868L),0x2492492492496128L,
+            unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),
+            0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,
+            0x2492492492492492L,unchecked((long)0x9249249249249249L),0x24924924L
           };
 
         // 32 vectors; 30 states per vector; array length = 960
         private readonly static long[] toStates5 = new long[] /*5 bits per value */ {
-            0x380e014a051404L,0xaa015452940L,(long)0x8052814501000000L,(long)0xb80a515450000e03L,
-            0x5140410842108426L,0x71dc421701c01540L,0x100421014610f7L,(long)0x85c0700550145010L,
-            (long)0x94a271843ddc7710L,0x1346071412108a22L,0x3115c52b44004c53L,(long)0xc504840108020006L,
+            0x380e014a051404L,0xaa015452940L,unchecked((long)0x8052814501000000L),unchecked((long)0xb80a515450000e03L),
+            0x5140410842108426L,0x71dc421701c01540L,0x100421014610f7L,unchecked((long)0x85c0700550145010L),
+            unchecked((long)0x94a271843ddc7710L),0x1346071412108a22L,0x3115c52b44004c53L,unchecked((long)0xc504840108020006L),
             0x54d1001314c4d181L,0x9081204239c4a71L,0x14c5313460714124L,0x51006428f971e0a2L,
             0x4d181c5048402884L,0xa3e5c782885314cL,0x2809409482a8a239L,0x2a84203846028a23L,
-            0x10800108aa028caL,0xe1180a288ca0240L,(long)0x98c6b80e3294a108L,0x2942328091098c10L,
-            0x11adb1ed08170560L,(long)0xa024004084240946L,0x7b4205c1580a508cL,(long)0xa8c2968c71846b6cL,
-            0x4cb5138623280910L,0x10863128ca2b8a20L,(long)0xe188ca0244029402L,0x4e3294e288132d44L,
-            (long)0x809409ad1218c39cL,(long)0xf14814cb51386232L,0x514454086429adb1L,0x32d44e188ca02440L,
-            (long)0x8c390a6b6c7c5205L,(long)0xd4218c41409cd2aaL,0x5063a0c4211c4710L,0x10442108421094e1L,
-            0x31084711c4350863L,(long)0xbdef7bddf05918f2L,(long)0xc4f10dc218c41ef7L,0x9d3642318458c63L,
-            0x70863104426098c6L,0x8c6116318f13c43L,0x41ef75dd6b5de4d9L,(long)0xd0212d0b41cc238cL,
-            0x2048108c6450e3a1L,0x42d07308e3105024L,(long)0xdb591938f274084bL,(long)0xc238c41f77deefbbL,
-            0x1f183e8c62d0b41cL,0x502a2194608d5a4L,(long)0xa318b42d07308e31L,(long)0xed675db56907c60fL,
-            (long)0xa410d4520c41f773L,0x54e13883a0ca511cL,0x1483104422944229L,0x20f2329447290435L,
-            0x1ef6f7ef6f7df05cL,(long)0xad63cb210dc520c4L,0x58c695d364e51845L,(long)0xc843714831044269L,
-            (long)0xe4d93946116b58f2L,0x520c41ef717d6b17L,(long)0x83a1d4512d4941ccL,0x50252848294c6438L,
-            0x144b525073148310L,(long)0xefaf7b591c20f275L,(long)0x941cc520c41f777bL,(long)0xd5a4e5183dcd62d4L,
-            0x4831050272994694L,0x460f7358b5250731L,(long)0xf779bd6717b56939L
+            0x10800108aa028caL,0xe1180a288ca0240L,unchecked((long)0x98c6b80e3294a108L),0x2942328091098c10L,
+            0x11adb1ed08170560L,unchecked((long)0xa024004084240946L),0x7b4205c1580a508cL,unchecked((long)0xa8c2968c71846b6cL),
+            0x4cb5138623280910L,0x10863128ca2b8a20L,unchecked((long)0xe188ca0244029402L),0x4e3294e288132d44L,
+            unchecked((long)0x809409ad1218c39cL),unchecked((long)0xf14814cb51386232L),0x514454086429adb1L,0x32d44e188ca02440L,
+            unchecked((long)0x8c390a6b6c7c5205L),unchecked((long)0xd4218c41409cd2aaL),0x5063a0c4211c4710L,0x10442108421094e1L,
+            0x31084711c4350863L,unchecked((long)0xbdef7bddf05918f2L),unchecked((long)0xc4f10dc218c41ef7L),0x9d3642318458c63L,
+            0x70863104426098c6L,0x8c6116318f13c43L,0x41ef75dd6b5de4d9L,unchecked((long)0xd0212d0b41cc238cL),
+            0x2048108c6450e3a1L,0x42d07308e3105024L,unchecked((long)0xdb591938f274084bL),unchecked((long)0xc238c41f77deefbbL),
+            0x1f183e8c62d0b41cL,0x502a2194608d5a4L,unchecked((long)0xa318b42d07308e31L),unchecked((long)0xed675db56907c60fL),
+            unchecked((long)0xa410d4520c41f773L),0x54e13883a0ca511cL,0x1483104422944229L,0x20f2329447290435L,
+            0x1ef6f7ef6f7df05cL,unchecked((long)0xad63cb210dc520c4L),0x58c695d364e51845L,unchecked((long)0xc843714831044269L),
+            unchecked((long)0xe4d93946116b58f2L),0x520c41ef717d6b17L,unchecked((long)0x83a1d4512d4941ccL),0x50252848294c6438L,
+            0x144b525073148310L,unchecked((long)0xefaf7b591c20f275L),unchecked((long)0x941cc520c41f777bL),unchecked((long)0xd5a4e5183dcd62d4L),
+            0x4831050272994694L,0x460f7358b5250731L,unchecked((long)0xf779bd6717b56939L)
           };
         private readonly static long[] offsetIncrs5 = new long[] /*3 bits per value */ {
             0x20c0600000010000L,0x40000000001L,0xb6db6d4830180L,0x4812900824800010L,
-            0x2092000040000082L,0x618000b659254a40L,(long)0x86c301b6c0618018L,(long)0xdb01860061860001L,
-            (long)0x81861800075baed6L,0x186e381b70081cL,(long)0xe56dc02072061860L,0x61201001200075b8L,
-            0x480000480492080L,0x52b5248201848040L,(long)0x880812810012000bL,0x4004800004a4492L,
-            0xb529124a20204aL,0x49b68061201061a0L,(long)0x8480418680018483L,0x1a000752ad26da01L,
-            0x4a349b6808128106L,(long)0xa0204a0418680018L,0x492492497528d26dL,0x2492492492492492L,
-            (long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,
-            0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,
-            0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,
-            (long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,
-            0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,
+            0x2092000040000082L,0x618000b659254a40L,unchecked((long)0x86c301b6c0618018L),unchecked((long)0xdb01860061860001L),
+            unchecked((long)0x81861800075baed6L),0x186e381b70081cL,unchecked((long)0xe56dc02072061860L),0x61201001200075b8L,
+            0x480000480492080L,0x52b5248201848040L,unchecked((long)0x880812810012000bL),0x4004800004a4492L,
+            0xb529124a20204aL,0x49b68061201061a0L,unchecked((long)0x8480418680018483L),0x1a000752ad26da01L,
+            0x4a349b6808128106L,unchecked((long)0xa0204a0418680018L),0x492492497528d26dL,0x2492492492492492L,
+            unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),
+            0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,
+            0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,
+            unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),
+            0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,
             0x2492492492492492L
           };
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/Automaton/Lev2TParametricDescription.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Automaton/Lev2TParametricDescription.cs b/src/core/Util/Automaton/Lev2TParametricDescription.cs
index f037620..b287d7f 100644
--- a/src/core/Util/Automaton/Lev2TParametricDescription.cs
+++ b/src/core/Util/Automaton/Lev2TParametricDescription.cs
@@ -110,11 +110,11 @@ namespace Lucene.Net.Util.Automaton
 
         // 8 vectors; 28 states per vector; array length = 224
         private readonly static long[] toStates3 = new long[] /*5 bits per value */ {
-            (long)0xe701c02940059404L,(long)0xa010162000a50000L,(long)0xb02c8c40a1416288L,(long)0xa821032310858c0L,
-            0x314423980d28b201L,0x5281e528847788e0L,(long)0xa23980d308c2280eL,0x1e3294b1a962278cL,
-            (long)0x8c41309e2288e528L,0x11444409021aca21L,0x11a4624886b1086bL,0x2a6258941d6240c4L,
-            0x5024a50b489074adL,0x14821aca520c411aL,0x5888b5890b594a44L,(long)0x941d6520c411a465L,
-            (long)0x8b589075ad6a62d4L,0x1a5055a4L
+            unchecked((long)0xe701c02940059404L),unchecked((long)0xa010162000a50000L),unchecked((long)0xb02c8c40a1416288L),unchecked((long)0xa821032310858c0L),
+            0x314423980d28b201L,0x5281e528847788e0L,unchecked((long)0xa23980d308c2280eL),0x1e3294b1a962278cL,
+            unchecked((long)0x8c41309e2288e528L),0x11444409021aca21L,0x11a4624886b1086bL,0x2a6258941d6240c4L,
+            0x5024a50b489074adL,0x14821aca520c411aL,0x5888b5890b594a44L,unchecked((long)0x941d6520c411a465L),
+            unchecked((long)0x8b589075ad6a62d4L),0x1a5055a4L
           };
         private readonly static long[] offsetIncrs3 = new long[] /*2 bits per value */ {
             0x30c30200002000L,0x2a0030f3c3fc333cL,0x233a00328282a820L,0x5555555532b283a8L,
@@ -123,91 +123,91 @@ namespace Lucene.Net.Util.Automaton
 
         // 16 vectors; 45 states per vector; array length = 720
         private readonly static long[] toStates4 = new long[] /*6 bits per value */ {
-            0x3801450002c5004L,(long)0xc500014b00000e38L,0x51451401402L,0x0L,
-            0x518000b14010000L,(long)0x9f1c20828e20230L,0x219f0df0830a70c2L,(long)0x8200008208208200L,
-            (long)0x805050160800800L,0x3082098602602643L,0x4564014250508064L,(long)0x850051420000831L,
-            0x4140582085002082L,0x456180980990c201L,(long)0x8316d0c50a01051L,0x21451420050df0e0L,
-            (long)0xd14214014508214L,0x3c21c01850821c60L,0x1cb1403cb142087L,(long)0x800821451851822cL,
-            0x20020820800020L,(long)0xd006182087180345L,(long)0xcb0a81cb24976b09L,(long)0x8b1a60e624709d1L,
-            0x249082082249089L,(long)0xc31421c600d2c024L,0x3c31451515454423L,0x31853c22c21cb140L,
-            0x4514500b2c208214L,(long)0x8718034508b0051L,(long)0xb2cb45515108f0c5L,(long)0xe824715d1cb0a810L,
-            0x1422cb14908b0e60L,0x30812c22c02cb145L,(long)0x842022020cb1420cL,0x5c20ce0820ce0850L,
-            0x208208208b0d70c2L,0x4208508214214208L,(long)0x920834050830c20L,(long)0xc6134dc613653592L,
-            (long)0xd309341c6dc4db4dL,0x6424d90854d34d34L,(long)0x92072c22030814c2L,0x4220724b24a30930L,
-            0x2470d72025c920e2L,(long)0x92c92d70975c9082L,(long)0xcb0880c204924e08L,0x45739728c24c2481L,
-            (long)0xc6da4db5da6174daL,0x4b5d35d75d30971dL,0x1030815c93825ce2L,0x51442051020cb145L,
-            (long)0xc538210e2c220e2cL,(long)0x851421452cb0d70L,0x204b085085145142L,(long)0x921560834051440cL,
-            0x4d660e4da60e6595L,(long)0x94d914e41c6dc658L,(long)0x826426591454d365L,0x2892072c51030813L,
-            (long)0xe2c22072cb2ca30bL,0x452c70d720538910L,(long)0x8b2cb2d708e3891L,(long)0x81cb1440c204b24eL,
-            (long)0xda44e38e28c2ca24L,0x1dc6da6585d660e4L,(long)0xe2cb5d338e5d914eL,0x38938238L
+            0x3801450002c5004L,unchecked((long)0xc500014b00000e38L),0x51451401402L,0x0L,
+            0x518000b14010000L,unchecked((long)0x9f1c20828e20230L),0x219f0df0830a70c2L,unchecked((long)0x8200008208208200L),
+            unchecked((long)0x805050160800800L),0x3082098602602643L,0x4564014250508064L,unchecked((long)0x850051420000831L),
+            0x4140582085002082L,0x456180980990c201L,unchecked((long)0x8316d0c50a01051L),0x21451420050df0e0L,
+            unchecked((long)0xd14214014508214L),0x3c21c01850821c60L,0x1cb1403cb142087L,unchecked((long)0x800821451851822cL),
+            0x20020820800020L,unchecked((long)0xd006182087180345L),unchecked((long)0xcb0a81cb24976b09L),unchecked((long)0x8b1a60e624709d1L),
+            0x249082082249089L,unchecked((long)0xc31421c600d2c024L),0x3c31451515454423L,0x31853c22c21cb140L,
+            0x4514500b2c208214L,unchecked((long)0x8718034508b0051L),unchecked((long)0xb2cb45515108f0c5L),unchecked((long)0xe824715d1cb0a810L),
+            0x1422cb14908b0e60L,0x30812c22c02cb145L,unchecked((long)0x842022020cb1420cL),0x5c20ce0820ce0850L,
+            0x208208208b0d70c2L,0x4208508214214208L,unchecked((long)0x920834050830c20L),unchecked((long)0xc6134dc613653592L),
+            unchecked((long)0xd309341c6dc4db4dL),0x6424d90854d34d34L,unchecked((long)0x92072c22030814c2L),0x4220724b24a30930L,
+            0x2470d72025c920e2L,unchecked((long)0x92c92d70975c9082L),unchecked((long)0xcb0880c204924e08L),0x45739728c24c2481L,
+            unchecked((long)0xc6da4db5da6174daL),0x4b5d35d75d30971dL,0x1030815c93825ce2L,0x51442051020cb145L,
+            unchecked((long)0xc538210e2c220e2cL),unchecked((long)0x851421452cb0d70L),0x204b085085145142L,unchecked((long)0x921560834051440cL),
+            0x4d660e4da60e6595L,unchecked((long)0x94d914e41c6dc658L),unchecked((long)0x826426591454d365L),0x2892072c51030813L,
+            unchecked((long)0xe2c22072cb2ca30bL),0x452c70d720538910L,unchecked((long)0x8b2cb2d708e3891L),unchecked((long)0x81cb1440c204b24eL),
+            unchecked((long)0xda44e38e28c2ca24L),0x1dc6da6585d660e4L,unchecked((long)0xe2cb5d338e5d914eL),0x38938238L
           };
         private readonly static long[] offsetIncrs4 = new long[] /*3 bits per value */ {
-            0x3002000000080000L,0x20c060L,(long)0x8149000004000000L,0x4024924110824824L,
-            (long)0xdb6030c360002082L,0x6c36c06c301b0d80L,(long)0xb01861b0000db0dbL,0x1b7036209188e06dL,
-            (long)0x800920006d86db7L,0x4920c2402402490L,0x49000208249009L,0x4908128128124804L,
-            0x34800104124a44a2L,(long)0xc30930900d24020cL,0x40009a0924c24d24L,0x4984a069201061aL,
-            0x494d049271269262L,0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,
-            0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,
-            (long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,
-            0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,
+            0x3002000000080000L,0x20c060L,unchecked((long)0x8149000004000000L),0x4024924110824824L,
+            unchecked((long)0xdb6030c360002082L),0x6c36c06c301b0d80L,unchecked((long)0xb01861b0000db0dbL),0x1b7036209188e06dL,
+            unchecked((long)0x800920006d86db7L),0x4920c2402402490L,0x49000208249009L,0x4908128128124804L,
+            0x34800104124a44a2L,unchecked((long)0xc30930900d24020cL),0x40009a0924c24d24L,0x4984a069201061aL,
+            0x494d049271269262L,0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,
+            0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,
+            unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),
+            0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,
             0x2492492492492492L,0x249249249249L
           };
 
         // 32 vectors; 45 states per vector; array length = 1440
         private readonly static long[] toStates5 = new long[] /*6 bits per value */ {
-            0x3801450002c5004L,(long)0xc500014b00000e38L,0x51451401402L,0x0L,
-            0x514000b14010000L,0x550000038e00e0L,0x264518500600b180L,(long)0x8208208208208208L,
-            0x2c50040820820L,0x70820a38808c0146L,(long)0xc37c20c29c30827cL,0x20820820800867L,
-            (long)0xb140102002002080L,(long)0x828e202300518000L,(long)0x830a70c209f1c20L,0x51451450853df0dfL,
+            0x3801450002c5004L,unchecked((long)0xc500014b00000e38L),0x51451401402L,0x0L,
+            0x514000b14010000L,0x550000038e00e0L,0x264518500600b180L,unchecked((long)0x8208208208208208L),
+            0x2c50040820820L,0x70820a38808c0146L,unchecked((long)0xc37c20c29c30827cL),0x20820820800867L,
+            unchecked((long)0xb140102002002080L),unchecked((long)0x828e202300518000L),unchecked((long)0x830a70c209f1c20L),0x51451450853df0dfL,
             0x1614214214508214L,0x6026026430805050L,0x2505080643082098L,0x4200008314564014L,
-            (long)0x850020820850051L,(long)0x80990c2014140582L,(long)0x8201920208261809L,(long)0x892051990060941L,
-            0x22492492c22cb242L,0x430805050162492cL,(long)0x8041451586026026L,0x37c38020c5b43142L,
-            0x4208508514508014L,0x141405850850051L,0x51456180980990c2L,(long)0xe008316d0c50a010L,
-            0x2c52cb2c508b21f0L,0x600d2c92c22cb249L,(long)0x873c21c01850821cL,0x2c01cb1403cb1420L,
-            0x2080082145185182L,0x4500200208208000L,(long)0x870061420871803L,0x740500f5050821cfL,
-            (long)0x934d964618609000L,0x4c24d34d30824d30L,0x1860821c600d642L,(long)0xc2a072c925dac274L,
-            0x2c69839891c27472L,(long)0x9242082089242242L,(long)0x8208718034b00900L,0x1cb24976b09d0061L,
-            0x60e624709d1cb0a8L,(long)0xd31455d71574ce3eL,0x1c600d3825c25d74L,0x51515454423c3142L,
-            (long)0xc22c21cb1403c314L,(long)0xb2c20821431853L,0x34508b005145145L,0x5515108f0c508718L,
-            (long)0x8740500f2051454L,(long)0xe2534d920618f090L,0x493826596592c238L,0x4423c31421c600d6L,
-            0x72c2a042cb2d1545L,0x422c3983a091c574L,(long)0xb2c514508b2c52L,(long)0xf0c508718034b08bL,
-            (long)0xa810b2cb45515108L,0x2260e824715d1cb0L,(long)0xe6592c538e2d74ceL,0x420c308138938238L,
-            (long)0x850842022020cb1L,0x70c25c20ce0820ceL,0x4208208208208b0dL,(long)0xc20420850821421L,
-            0x21080880832c5083L,(long)0xa50838820838c214L,(long)0xaaaaaaaaa9c39430L,0x1aaa7eaa9fa9faaaL,
-            (long)0x824820d01420c308L,0x7184d37184d94d64L,0x34c24d071b7136d3L,(long)0x990936421534d34dL,
-            (long)0x834050830c20530L,0x34dc613653592092L,(long)0xa479c6dc4db4dc61L,(long)0x920a9f924924924aL,
-            0x72c220308192a82aL,0x724b24a30930920L,(long)0xd72025c920e2422L,(long)0x92d70975c9082247L,
-            (long)0x880c204924e0892cL,0x2c928c24c2481cb0L,(long)0x80a5248889088749L,0x6a861b2aaac74394L,
-            (long)0x81b2ca6ab27b278L,(long)0xa3093092072c2203L,(long)0xd76985d36915ce5cL,0x5d74c25c771b6936L,
+            unchecked((long)0x850020820850051L),unchecked((long)0x80990c2014140582L),unchecked((long)0x8201920208261809L),unchecked((long)0x892051990060941L),
+            0x22492492c22cb242L,0x430805050162492cL,unchecked((long)0x8041451586026026L),0x37c38020c5b43142L,
+            0x4208508514508014L,0x141405850850051L,0x51456180980990c2L,unchecked((long)0xe008316d0c50a010L),
+            0x2c52cb2c508b21f0L,0x600d2c92c22cb249L,unchecked((long)0x873c21c01850821cL),0x2c01cb1403cb1420L,
+            0x2080082145185182L,0x4500200208208000L,unchecked((long)0x870061420871803L),0x740500f5050821cfL,
+            unchecked((long)0x934d964618609000L),0x4c24d34d30824d30L,0x1860821c600d642L,unchecked((long)0xc2a072c925dac274L),
+            0x2c69839891c27472L,unchecked((long)0x9242082089242242L),unchecked((long)0x8208718034b00900L),0x1cb24976b09d0061L,
+            0x60e624709d1cb0a8L,unchecked((long)0xd31455d71574ce3eL),0x1c600d3825c25d74L,0x51515454423c3142L,
+            unchecked((long)0xc22c21cb1403c314L),unchecked((long)0xb2c20821431853L),0x34508b005145145L,0x5515108f0c508718L,
+            unchecked((long)0x8740500f2051454L),unchecked((long)0xe2534d920618f090L),0x493826596592c238L,0x4423c31421c600d6L,
+            0x72c2a042cb2d1545L,0x422c3983a091c574L,unchecked((long)0xb2c514508b2c52L),unchecked((long)0xf0c508718034b08bL),
+            unchecked((long)0xa810b2cb45515108L),0x2260e824715d1cb0L,unchecked((long)0xe6592c538e2d74ceL),0x420c308138938238L,
+            unchecked((long)0x850842022020cb1L),0x70c25c20ce0820ceL,0x4208208208208b0dL,unchecked((long)0xc20420850821421L),
+            0x21080880832c5083L,unchecked((long)0xa50838820838c214L),unchecked((long)0xaaaaaaaaa9c39430L),0x1aaa7eaa9fa9faaaL,
+            unchecked((long)0x824820d01420c308L),0x7184d37184d94d64L,0x34c24d071b7136d3L,unchecked((long)0x990936421534d34dL),
+            unchecked((long)0x834050830c20530L),0x34dc613653592092L,unchecked((long)0xa479c6dc4db4dc61L),unchecked((long)0x920a9f924924924aL),
+            0x72c220308192a82aL,0x724b24a30930920L,unchecked((long)0xd72025c920e2422L),unchecked((long)0x92d70975c9082247L),
+            unchecked((long)0x880c204924e0892cL),0x2c928c24c2481cb0L,unchecked((long)0x80a5248889088749L),0x6a861b2aaac74394L,
+            unchecked((long)0x81b2ca6ab27b278L),unchecked((long)0xa3093092072c2203L),unchecked((long)0xd76985d36915ce5cL),0x5d74c25c771b6936L,
             0x724e0973892d74d7L,0x4c2481cb0880c205L,0x6174da45739728c2L,0x4aa175c6da4db5daL,
-            0x6a869b2786486186L,(long)0xcb14510308186caL,0x220e2c5144205102L,(long)0xcb0d70c538210e2cL,
-            0x1451420851421452L,0x51440c204b085085L,(long)0xcb1451081440832cL,(long)0x94316208488b0888L,
-            (long)0xfaaa7dfa9f7e79c3L,0x30819ea7ea7df7dL,0x6564855820d01451L,(long)0x9613598393698399L,
-            (long)0xd965364539071b71L,0x4e0990996451534L,0x21560834051440c2L,(long)0xd660e4da60e65959L,
-            (long)0x9207e979c6dc6584L,(long)0xa82a8207df924820L,(long)0x892072c5103081a6L,0x2c22072cb2ca30b2L,
-            0x52c70d720538910eL,(long)0x8b2cb2d708e38914L,0x1cb1440c204b24e0L,(long)0x874b2cb28c2ca248L,
-            0x4394816224488b08L,(long)0x9e786aa69b1f7e77L,0x51030819eca6a9e7L,(long)0x8e38a30b2892072cL,
-            0x6996175983936913L,0x74ce39764538771bL,(long)0xc204e24e08e38b2dL,0x28c2ca2481cb1440L,
-            (long)0x85d660e4da44e38eL,0x698607e975c6da65L,(long)0xa6ca6aa699e7864aL
+            0x6a869b2786486186L,unchecked((long)0xcb14510308186caL),0x220e2c5144205102L,unchecked((long)0xcb0d70c538210e2cL),
+            0x1451420851421452L,0x51440c204b085085L,unchecked((long)0xcb1451081440832cL),unchecked((long)0x94316208488b0888L),
+            unchecked((long)0xfaaa7dfa9f7e79c3L),0x30819ea7ea7df7dL,0x6564855820d01451L,unchecked((long)0x9613598393698399L),
+            unchecked((long)0xd965364539071b71L),0x4e0990996451534L,0x21560834051440c2L,unchecked((long)0xd660e4da60e65959L),
+            unchecked((long)0x9207e979c6dc6584L),unchecked((long)0xa82a8207df924820L),unchecked((long)0x892072c5103081a6L),0x2c22072cb2ca30b2L,
+            0x52c70d720538910eL,unchecked((long)0x8b2cb2d708e38914L),0x1cb1440c204b24e0L,unchecked((long)0x874b2cb28c2ca248L),
+            0x4394816224488b08L,unchecked((long)0x9e786aa69b1f7e77L),0x51030819eca6a9e7L,unchecked((long)0x8e38a30b2892072cL),
+            0x6996175983936913L,0x74ce39764538771bL,unchecked((long)0xc204e24e08e38b2dL),0x28c2ca2481cb1440L,
+            unchecked((long)0x85d660e4da44e38eL),0x698607e975c6da65L,unchecked((long)0xa6ca6aa699e7864aL)
           };
         private readonly static long[] offsetIncrs5 = new long[] /*3 bits per value */ {
-            0x3002000000080000L,0x20c060L,0x100000004000000L,(long)0xdb6db6db50603018L,
-            (long)0xa480000200002db6L,0x1249208841241240L,0x4000010000104120L,0x2492c42092092052L,
-            (long)0xc30d800096592d9L,(long)0xb01b0c06c36036d8L,0x186c00036c36db0dL,(long)0xad860361b01b6c06L,
-            0x360001b75b6dd6ddL,(long)0xc412311c0db6030cL,(long)0xdb0db6e36e06L,(long)0x9188e06db01861bL,
+            0x3002000000080000L,0x20c060L,0x100000004000000L,unchecked((long)0xdb6db6db50603018L),
+            unchecked((long)0xa480000200002db6L),0x1249208841241240L,0x4000010000104120L,0x2492c42092092052L,
+            unchecked((long)0xc30d800096592d9L),unchecked((long)0xb01b0c06c36036d8L),0x186c00036c36db0dL,unchecked((long)0xad860361b01b6c06L),
+            0x360001b75b6dd6ddL,unchecked((long)0xc412311c0db6030cL),unchecked((long)0xdb0db6e36e06L),unchecked((long)0x9188e06db01861bL),
             0x6dd6db71b72b62L,0x4024024900800920L,0x20824900904920c2L,0x1201248040049000L,
-            0x5524ad4aa4906120L,0x4092402002480015L,(long)0x9252251248409409L,0x4920100124000820L,
-            0x29128924204a04a0L,(long)0x900830d200055549L,(long)0x934930c24c24034L,0x418690002682493L,
-            (long)0x9a49861261201a48L,(long)0xc348001355249d4L,0x24c40930940d2402L,0x1a40009a0924e24dL,
-            0x6204984a06920106L,(long)0x92494d5492712692L,0x4924924924924924L,0x2492492492492492L,
-            (long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,
-            0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,
-            0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,
-            (long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,
-            0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,
-            0x2492492492492492L,(long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,
-            (long)0x9249249249249249L,0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,
-            0x4924924924924924L,0x2492492492492492L,(long)0x9249249249249249L,0x24924924L
+            0x5524ad4aa4906120L,0x4092402002480015L,unchecked((long)0x9252251248409409L),0x4920100124000820L,
+            0x29128924204a04a0L,unchecked((long)0x900830d200055549L),unchecked((long)0x934930c24c24034L),0x418690002682493L,
+            unchecked((long)0x9a49861261201a48L),unchecked((long)0xc348001355249d4L),0x24c40930940d2402L,0x1a40009a0924e24dL,
+            0x6204984a06920106L,unchecked((long)0x92494d5492712692L),0x4924924924924924L,0x2492492492492492L,
+            unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),
+            0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,
+            0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,
+            unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),
+            0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,
+            0x2492492492492492L,unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,
+            unchecked((long)0x9249249249249249L),0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),
+            0x4924924924924924L,0x2492492492492492L,unchecked((long)0x9249249249249249L),0x24924924L
           };
 
         // state map


[12/50] [abbrv] git commit: Port: csproj checkin

Posted by mh...@apache.org.
Port: csproj checkin


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/b713f3b7
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/b713f3b7
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/b713f3b7

Branch: refs/heads/branch_4x
Commit: b713f3b7211791f16a69e7f69842381e2eea0cd6
Parents: 14f5ae0
Author: James Blair <jm...@gmail.com>
Authored: Tue Jul 16 17:32:49 2013 -0400
Committer: James Blair <jm...@gmail.com>
Committed: Tue Jul 16 17:32:49 2013 -0400

----------------------------------------------------------------------
 test/core/Lucene.Net.Test.csproj | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b713f3b7/test/core/Lucene.Net.Test.csproj
----------------------------------------------------------------------
diff --git a/test/core/Lucene.Net.Test.csproj b/test/core/Lucene.Net.Test.csproj
index 8c12684..2544dd6 100644
--- a/test/core/Lucene.Net.Test.csproj
+++ b/test/core/Lucene.Net.Test.csproj
@@ -527,6 +527,14 @@
       <SubType>Code</SubType>
     </Compile>
     <Compile Include="Support\TestSupportClass.cs" />
+    <Compile Include="Util\Automaton\TestBasicOperations.cs" />
+    <Compile Include="Util\Automaton\TestCompiledAutomaton.cs" />
+    <Compile Include="Util\Automaton\TestDeterminism.cs" />
+    <Compile Include="Util\Automaton\TestDeterminizeLexicon.cs" />
+    <Compile Include="Util\Automaton\TestLevenshteinAutomata.cs" />
+    <Compile Include="Util\Automaton\TestMinimize.cs" />
+    <Compile Include="Util\Automaton\TestSpecialOperations.cs" />
+    <Compile Include="Util\Automaton\TestUTF32ToUTF8.cs" />
     <Compile Include="Util\Cache\TestSimpleLRUCache.cs" />
     <Compile Include="Util\English.cs">
       <SubType>Code</SubType>
@@ -571,6 +579,7 @@
     <Compile Include="Util\TestVersion.cs" />
     <Compile Include="Util\TestVersionComparator.cs" />
     <Compile Include="Util\TestVirtualMethod.cs" />
+    <Compile Include="Util\TestWeakIdentityMap.cs" />
     <Compile Include="Util\_TestUtil.cs">
       <SubType>Code</SubType>
     </Compile>
@@ -639,7 +648,6 @@
     <Content Include="UpdatedTests.txt" />
   </ItemGroup>
   <ItemGroup>
-    <Folder Include="Util\Automaton\" />
     <Folder Include="Util\Fst\" />
     <Folder Include="Util\Packed\" />
   </ItemGroup>


[49/50] [abbrv] git commit: fixed fst build errors

Posted by mh...@apache.org.
fixed fst build errors


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/07f83ff1
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/07f83ff1
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/07f83ff1

Branch: refs/heads/branch_4x
Commit: 07f83ff1c516fddcf14fcacf4704a20251571e75
Parents: b7ca14a
Author: James Blair <jm...@gmail.com>
Authored: Tue Aug 13 16:15:50 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Aug 13 16:18:13 2013 -0400

----------------------------------------------------------------------
 src/core/Util/Fst/FST.cs | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/07f83ff1/src/core/Util/Fst/FST.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/FST.cs b/src/core/Util/Fst/FST.cs
index 70f4e8e..145af95 100644
--- a/src/core/Util/Fst/FST.cs
+++ b/src/core/Util/Fst/FST.cs
@@ -352,7 +352,7 @@ namespace Lucene.Net.Util.Fst
         public void Save(FileInfo fileInfo)
         {
             var success = false;
-            var bs = new BufferedStream(new FileStream(fileInfo.FullName));
+            var bs = new BufferedStream(fileInfo.OpenWrite());
             try
             {
                 Save(new OutputStreamDataOutput(bs));
@@ -374,9 +374,9 @@ namespace Lucene.Net.Util.Fst
         /// <param name="fileInfo"></param>
         /// <param name="outputs"></param>
         /// <returns></returns>
-        public static FST<TMethod> Read<TMethod>(FileStream fileInfo, Outputs<TMethod> outputs) where TMethod : class
+        public static FST<TMethod> Read<TMethod>(FileInfo fileInfo, Outputs<TMethod> outputs) where TMethod : class
         {
-            var bs = new BufferedStream(new FileStream(fileInfo));
+            var bs = new BufferedStream(fileInfo.OpenRead());
             var success = false;
             try
             {
@@ -856,7 +856,7 @@ namespace Lucene.Net.Util.Fst
         /// <returns></returns>
         public int ReadNextArcLabel(Arc<T> arc, FST.BytesReader input)
         {
-            if (arc.IsLast)
+            if (arc.IsLast())
                 throw new ArgumentException("cannot readNextArc when arc.isLast()=true");
 
             if (arc.Label == END_LABEL)


[02/50] [abbrv] git commit: another set of files

Posted by mh...@apache.org.
another set of files


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/64c13f3c
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/64c13f3c
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/64c13f3c

Branch: refs/heads/branch_4x
Commit: 64c13f3c5cf452252c0e45ff88860ebca5da0174
Parents: 666d83e
Author: Mike Potts <mi...@feature23.com>
Authored: Wed Jul 10 23:15:58 2013 -0400
Committer: Mike Potts <mi...@feature23.com>
Committed: Wed Jul 10 23:15:58 2013 -0400

----------------------------------------------------------------------
 .../CompressingStoredFieldsIndexReader.cs       | 175 +++++
 .../CompressingStoredFieldsReader.cs            |   2 +-
 .../CompressingStoredFieldsWriter.cs            | 756 +++++++++----------
 src/core/Codecs/Compressing/Compressor.cs       |   2 +-
 .../Compressing/GrowableByteArrayDataOutput.cs  |  34 +-
 src/core/Lucene.Net.csproj                      |   2 +
 6 files changed, 575 insertions(+), 396 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/64c13f3c/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
new file mode 100644
index 0000000..f981b32
--- /dev/null
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
@@ -0,0 +1,175 @@
+using Lucene.Net.Index;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Packed;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Codecs.Compressing
+{
+    public sealed class CompressingStoredFieldsIndexReader: ICloneable //Closable??
+    {
+        int maxDoc;
+        int[] docBases;
+        long[] startPointers;
+        int[] avgChunkDocs;
+        long[] avgChunkSizes;
+        PackedInts.Reader[] docBasesDeltas; // delta from the avg
+        PackedInts.Reader[] startPointersDeltas; // delta from the avg
+
+        IndexInput fieldsIndexIn;
+
+        static long MoveLowOrderBitToSign(long n) 
+        {
+            return ((Number.URShift(n, 1) ^ -(n & 1)));
+        }
+
+        public CompressingStoredFieldsIndexReader(IndexInput fieldsIndexIn, SegmentInfo si) 
+        {
+            this.fieldsIndexIn = fieldsIndexIn;
+            maxDoc = si.DocCount;
+            int[] docBases = new int[16];
+            long[] startPointers = new long[16];
+            int[] avgChunkDocs = new int[16];
+            long[] avgChunkSizes = new long[16];
+            PackedInts.Reader[] docBasesDeltas = new PackedInts.Reader[16];
+            PackedInts.Reader[] startPointersDeltas = new PackedInts.Reader[16];
+
+            int packedIntsVersion = fieldsIndexIn.ReadVInt();
+
+            int blockCount = 0;
+
+            for (;;) {
+              int numChunks = fieldsIndexIn.ReadVInt();
+              if (numChunks == 0) {
+                break;
+              }
+
+              if (blockCount == docBases.Length) {
+                int newSize = ArrayUtil.Oversize(blockCount + 1, 8);
+                docBases = Arrays.CopyOf(docBases, newSize);
+                startPointers = Arrays.CopyOf(startPointers, newSize);
+                avgChunkDocs = Arrays.CopyOf(avgChunkDocs, newSize);
+                avgChunkSizes = Arrays.CopyOf(avgChunkSizes, newSize);
+                docBasesDeltas = Arrays.CopyOf(docBasesDeltas, newSize);
+                startPointersDeltas = Arrays.CopyOf(startPointersDeltas, newSize);
+              }
+
+              // doc bases
+              docBases[blockCount] = fieldsIndexIn.ReadVInt();
+              avgChunkDocs[blockCount] = fieldsIndexIn.ReadVInt();
+              int bitsPerDocBase = fieldsIndexIn.ReadVInt();
+              if (bitsPerDocBase > 32) {
+                throw new CorruptIndexException("Corrupted");
+              }
+              docBasesDeltas[blockCount] = (Lucene.Net.Util.Packed.PackedInts.Reader)PackedInts.GetReaderNoHeader(fieldsIndexIn, PackedInts.Format.PACKED, packedIntsVersion, numChunks, bitsPerDocBase);
+
+              // start pointers
+              startPointers[blockCount] = fieldsIndexIn.ReadVLong();
+              avgChunkSizes[blockCount] = fieldsIndexIn.ReadVLong();
+              int bitsPerStartPointer = fieldsIndexIn.ReadVInt();
+              if (bitsPerStartPointer > 64) {
+                throw new CorruptIndexException("Corrupted");
+              }
+              startPointersDeltas[blockCount] = (Lucene.Net.Util.Packed.PackedInts.Reader)PackedInts.GetReaderNoHeader(fieldsIndexIn, PackedInts.Format.PACKED, packedIntsVersion, numChunks, bitsPerStartPointer);
+
+              ++blockCount;
+            }
+
+            this.docBases = Arrays.CopyOf(docBases, blockCount);
+            this.startPointers = Arrays.CopyOf(startPointers, blockCount);
+            this.avgChunkDocs = Arrays.CopyOf(avgChunkDocs, blockCount);
+            this.avgChunkSizes = Arrays.CopyOf(avgChunkSizes, blockCount);
+            this.docBasesDeltas = Arrays.CopyOf(docBasesDeltas, blockCount);
+            this.startPointersDeltas = Arrays.CopyOf(startPointersDeltas, blockCount);
+        }
+
+        private CompressingStoredFieldsIndexReader(CompressingStoredFieldsIndexReader other)
+        {
+            this.fieldsIndexIn = null;
+            this.maxDoc = other.maxDoc;
+            this.docBases = other.docBases;
+            this.startPointers = other.startPointers;
+            this.avgChunkDocs = other.avgChunkDocs;
+            this.avgChunkSizes = other.avgChunkSizes;
+            this.docBasesDeltas = other.docBasesDeltas;
+            this.startPointersDeltas = other.startPointersDeltas;
+        }
+
+        private int Block(int docID) 
+        {
+            int lo = 0, hi = docBases.Length - 1;
+            while (lo <= hi) {
+              int mid = Number.URShift(lo + hi, 1);
+              int midValue = docBases[mid];
+              if (midValue == docID) {
+                return mid;
+              } else if (midValue < docID) {
+                lo = mid + 1;
+              } else {
+                hi = mid - 1;
+              }
+            }
+            return hi;
+        }
+
+        private int relativeDocBase(int block, int relativeChunk) 
+        {
+            int expected = avgChunkDocs[block] * relativeChunk;
+            long delta = MoveLowOrderBitToSign(docBasesDeltas[block].Get(relativeChunk));
+            return expected + (int) delta;
+        }
+
+          private long relativeStartPointer(int block, int relativeChunk) 
+          {
+            long expected = avgChunkSizes[block] * relativeChunk;
+            long delta = MoveLowOrderBitToSign(startPointersDeltas[block].Get(relativeChunk));
+            return expected + delta;
+          }
+
+          private int relativeChunk(int block, int relativeDoc) 
+          {
+            int lo = 0, hi = docBasesDeltas[block].Size() - 1;
+            while (lo <= hi) {
+              int mid = Number.URShift(lo + hi, 1);
+              int midValue = relativeDocBase(block, mid);
+              if (midValue == relativeDoc) {
+                return mid;
+              } else if (midValue < relativeDoc) {
+                lo = mid + 1;
+              } else {
+                hi = mid - 1;
+              }
+            }
+            return hi;
+          }
+
+          private long getStartPointer(int docID) 
+          {
+            if (docID < 0 || docID >= maxDoc) {
+              throw new ArgumentException("docID out of range [0-" + maxDoc + "]: " + docID);
+            }
+            int block = Block(docID);
+            int relativeChunk = this.relativeChunk(block, docID - docBases[block]);
+            return startPointers[block] + relativeStartPointer(block, relativeChunk);
+          }
+
+          public override CompressingStoredFieldsIndexReader clone() 
+          {
+            if (fieldsIndexIn == null) {
+              return this;
+            } else {
+              return new CompressingStoredFieldsIndexReader(this);
+            }
+          }
+
+          public override void close()
+          {
+            IOUtils.Close(fieldsIndexIn);
+          }
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/64c13f3c/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
index ad909ce..9c55e07 100644
--- a/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
@@ -59,7 +59,7 @@ public sealed class CompressingStoredFieldsReader: StoredFieldsReader {
     string segment = si.name;
     bool success = false;
     fieldInfos = fn;
-    numDocs = si.getDocCount();
+    numDocs = si.DocCount;
     IndexInput indexStream = null;
     try {
       fieldsStream = d.OpenInput(IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/64c13f3c/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs
index 54882fd..1543196 100644
--- a/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs
@@ -1,413 +1,391 @@
-package org.apache.lucene.codecs.compressing;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import static org.apache.lucene.codecs.lucene40.Lucene40StoredFieldsWriter.FIELDS_EXTENSION;
-import static org.apache.lucene.codecs.lucene40.Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.codecs.StoredFieldsReader;
-import org.apache.lucene.codecs.StoredFieldsWriter;
-import org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.ChunkIterator;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.AtomicReader;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.IndexableField;
-import org.apache.lucene.index.MergeState;
-import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.SegmentReader;
-import org.apache.lucene.store.DataOutput;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.packed.PackedInts;
-
-/**
- * {@link StoredFieldsWriter} impl for {@link CompressingStoredFieldsFormat}.
- * @lucene.experimental
- */
-public final class CompressingStoredFieldsWriter extends StoredFieldsWriter {
-
-  // hard limit on the maximum number of documents per chunk
-  static final int MAX_DOCUMENTS_PER_CHUNK = 128;
-
-  static final int         STRING = 0x00;
-  static final int       BYTE_ARR = 0x01;
-  static final int    NUMERIC_INT = 0x02;
-  static final int  NUMERIC_FLOAT = 0x03;
-  static final int   NUMERIC_LONG = 0x04;
-  static final int NUMERIC_DOUBLE = 0x05;
-
-  static final int TYPE_BITS = PackedInts.bitsRequired(NUMERIC_DOUBLE);
-  static final int TYPE_MASK = (int) PackedInts.maxValue(TYPE_BITS);
-
-  static final String CODEC_SFX_IDX = "Index";
-  static final String CODEC_SFX_DAT = "Data";
-  static final int VERSION_START = 0;
-  static final int VERSION_CURRENT = VERSION_START;
-
-  private final Directory directory;
-  private final String segment;
-  private final String segmentSuffix;
-  private CompressingStoredFieldsIndexWriter indexWriter;
-  private IndexOutput fieldsStream;
-
-  private final CompressionMode compressionMode;
-  private final Compressor compressor;
-  private final int chunkSize;
-
-  private final GrowableByteArrayDataOutput bufferedDocs;
-  private int[] numStoredFields; // number of stored fields
-  private int[] endOffsets; // end offsets in bufferedDocs
-  private int docBase; // doc ID at the beginning of the chunk
-  private int numBufferedDocs; // docBase + numBufferedDocs == current doc ID
-
-  /** Sole constructor. */
-  public CompressingStoredFieldsWriter(Directory directory, SegmentInfo si, String segmentSuffix, IOContext context,
-      String formatName, CompressionMode compressionMode, int chunkSize) throws IOException {
-    assert directory != null;
-    this.directory = directory;
-    this.segment = si.name;
-    this.segmentSuffix = segmentSuffix;
-    this.compressionMode = compressionMode;
-    this.compressor = compressionMode.newCompressor();
-    this.chunkSize = chunkSize;
-    this.docBase = 0;
-    this.bufferedDocs = new GrowableByteArrayDataOutput(chunkSize);
-    this.numStoredFields = new int[16];
-    this.endOffsets = new int[16];
-    this.numBufferedDocs = 0;
-
-    boolean success = false;
-    IndexOutput indexStream = directory.createOutput(IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION), context);
-    try {
-      fieldsStream = directory.createOutput(IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);
-
-      final String codecNameIdx = formatName + CODEC_SFX_IDX;
-      final String codecNameDat = formatName + CODEC_SFX_DAT;
-      CodecUtil.writeHeader(indexStream, codecNameIdx, VERSION_CURRENT);
-      CodecUtil.writeHeader(fieldsStream, codecNameDat, VERSION_CURRENT);
-      assert CodecUtil.headerLength(codecNameDat) == fieldsStream.getFilePointer();
-      assert CodecUtil.headerLength(codecNameIdx) == indexStream.getFilePointer();
-
-      indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
-      indexStream = null;
-
-      fieldsStream.writeVInt(PackedInts.VERSION_CURRENT);
-
-      success = true;
-    } finally {
-      if (!success) {
-        IOUtils.closeWhileHandlingException(indexStream);
-        abort();
-      }
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      IOUtils.close(fieldsStream, indexWriter);
-    } finally {
-      fieldsStream = null;
-      indexWriter = null;
-    }
-  }
-
-  @Override
-  public void startDocument(int numStoredFields) throws IOException {
-    if (numBufferedDocs == this.numStoredFields.length) {
-      final int newLength = ArrayUtil.oversize(numBufferedDocs + 1, 4);
-      this.numStoredFields = Arrays.copyOf(this.numStoredFields, newLength);
-      endOffsets = Arrays.copyOf(endOffsets, newLength);
-    }
-    this.numStoredFields[numBufferedDocs] = numStoredFields;
-    ++numBufferedDocs;
-  }
-
-  @Override
-  public void finishDocument() throws IOException {
-    endOffsets[numBufferedDocs - 1] = bufferedDocs.length;
-    if (triggerFlush()) {
-      flush();
-    }
-  }
-
-  private static void saveInts(int[] values, int length, DataOutput out) throws IOException {
-    assert length > 0;
-    if (length == 1) {
-      out.writeVInt(values[0]);
-    } else {
-      boolean allEqual = true;
-      for (int i = 1; i < length; ++i) {
-        if (values[i] != values[0]) {
-          allEqual = false;
-          break;
-        }
-      }
-      if (allEqual) {
-        out.writeVInt(0);
-        out.writeVInt(values[0]);
-      } else {
-        long max = 0;
-        for (int i = 0; i < length; ++i) {
-          max |= values[i];
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Packed;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Codecs.Compressing
+{
+    public sealed class CompressingStoredFieldsWriter : StoredFieldsWriter
+    {
+        static readonly int MAX_DOCUMENTS_PER_CHUNK = 128;
+        static readonly int STRING = 0x00;
+        static readonly int BYTE_ARR = 0x01;
+        static readonly int NUMERIC_INT = 0x02;
+        static readonly int NUMERIC_FLOAT = 0x03;
+        static readonly int NUMERIC_LONG = 0x04;
+        static readonly int NUMERIC_DOUBLE = 0x05;
+
+        static readonly int TYPE_BITS = PackedInts.bitsRequired(NUMERIC_DOUBLE);
+        static readonly int TYPE_MASK = (int)PackedInts.maxValue(TYPE_BITS);
+
+        static readonly String CODEC_SFX_IDX = "Index";
+        static readonly String CODEC_SFX_DAT = "Data";
+        static readonly int VERSION_START = 0;
+        static readonly int VERSION_CURRENT = VERSION_START;
+
+        private Directory directory;
+        private string segment;
+        private string segmentSuffix;
+        private CompressingStoredFieldsIndexWriter indexWriter;
+        private IndexOutput fieldsStream;
+
+        private CompressionMode compressionMode;
+        private Compressor compressor;
+        private int chunkSize;
+
+        private GrowableByteArrayDataOutput bufferedDocs;
+        private int[] numStoredFields; // number of stored fields
+        private int[] endOffsets; // end offsets in bufferedDocs
+        private int docBase; // doc ID at the beginning of the chunk
+        private int numBufferedDocs; // docBase + numBufferedDocs == current doc ID
+
+        public CompressingStoredFieldsWriter(Directory directory, SegmentInfo si, string segmentSuffix, IOContext context, string formatName, CompressionMode compressionMode, int chunkSize) 
+        {
+          this.directory = directory;
+          this.segment = si.name;
+          this.segmentSuffix = segmentSuffix;
+          this.compressionMode = compressionMode;
+          this.compressor = compressionMode.newCompressor();
+          this.chunkSize = chunkSize;
+          this.docBase = 0;
+          this.bufferedDocs = new GrowableByteArrayDataOutput(chunkSize);
+          this.numStoredFields = new int[16];
+          this.endOffsets = new int[16];
+          this.numBufferedDocs = 0;
+
+          bool success = false;
+          IndexOutput indexStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION), context);
+          try 
+          {
+            fieldsStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);
+
+            string codecNameIdx = formatName + CODEC_SFX_IDX;
+            string codecNameDat = formatName + CODEC_SFX_DAT;
+            CodecUtil.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT);
+            CodecUtil.WriteHeader(fieldsStream, codecNameDat, VERSION_CURRENT);
+
+            indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
+            indexStream = null;
+
+            fieldsStream.WriteVInt(PackedInts.VERSION_CURRENT);
+
+            success = true;
+          } 
+          finally 
+          {
+            if (!success) {
+              IOUtils.CloseWhileHandlingException(indexStream);
+              abort();
+            }
+          }
         }
-        final int bitsRequired = PackedInts.bitsRequired(max);
-        out.writeVInt(bitsRequired);
-        final PackedInts.Writer w = PackedInts.getWriterNoHeader(out, PackedInts.Format.PACKED, length, bitsRequired, 1);
-        for (int i = 0; i < length; ++i) {
-          w.add(values[i]);
+
+        public override void Close()
+        {
+            try
+            {
+                IOUtils.Close(fieldsStream, indexWriter);
+            }
+            finally
+            {
+                fieldsStream = null;
+                indexWriter = null;
+            }
         }
-        w.finish();
-      }
-    }
-  }
 
-  private void writeHeader(int docBase, int numBufferedDocs, int[] numStoredFields, int[] lengths) throws IOException {
-    // save docBase and numBufferedDocs
-    fieldsStream.writeVInt(docBase);
-    fieldsStream.writeVInt(numBufferedDocs);
+        public override void StartDocument(int numStoredFields)
+        {
+            if (numBufferedDocs == this.numStoredFields.Length)
+            {
+                int newLength = ArrayUtil.Oversize(numBufferedDocs + 1, 4);
+                this.numStoredFields = Arrays.CopyOf(this.numStoredFields, newLength);
+                endOffsets = Arrays.CopyOf(endOffsets, newLength);
+            }
+            this.numStoredFields[numBufferedDocs] = numStoredFields;
+            ++numBufferedDocs;
+        }
 
-    // save numStoredFields
-    saveInts(numStoredFields, numBufferedDocs, fieldsStream);
+        public override void FinishDocument()
+        {
+            endOffsets[numBufferedDocs - 1] = bufferedDocs.Length;
+            if (TriggerFlush())
+            {
+                Flush();
+            }
+        }
 
-    // save lengths
-    saveInts(lengths, numBufferedDocs, fieldsStream);
-  }
+        private static void saveInts(int[] values, int length, DataOutput output) 
+        {
+          if (length == 1) 
+          {
+            output.WriteVInt(values[0]);
+          } 
+          else 
+          {
+            bool allEqual = true;
+            for (int i = 1; i < length; ++i) {
+              if (values[i] != values[0]) {
+                allEqual = false;
+                //break;
+              }
+            }
+            if (allEqual) {
+              output.WriteVInt(0);
+              output.WriteVInt(values[0]);
+            } 
+            else 
+            {
+              long max = 0;
+              for (int i = 0; i < length; ++i) {
+                max |= values[i];
+              }
+              int bitsRequired = PackedInts.BitsRequired(max);
+              output.WriteVInt(bitsRequired);
+              PackedInts.Writer w = PackedInts.GetWriterNoHeader(output, PackedInts.Format.PACKED, length, bitsRequired, 1);
+              for (int i = 0; i < length; ++i) {
+                w.Add(values[i]);
+              }
+              w.Finish();
+            }
+          }
+        }
 
-  private boolean triggerFlush() {
-    return bufferedDocs.length >= chunkSize || // chunks of at least chunkSize bytes
-        numBufferedDocs >= MAX_DOCUMENTS_PER_CHUNK;
-  }
+        private void WriteHeader(int docBase, int numBufferedDocs, int[] numStoredFields, int[] lengths)
+        {
+            // save docBase and numBufferedDocs
+            fieldsStream.WriteVInt(docBase);
+            fieldsStream.WriteVInt(numBufferedDocs);
 
-  private void flush() throws IOException {
-    indexWriter.writeIndex(numBufferedDocs, fieldsStream.getFilePointer());
+            // save numStoredFields
+            saveInts(numStoredFields, numBufferedDocs, fieldsStream);
 
-    // transform end offsets into lengths
-    final int[] lengths = endOffsets;
-    for (int i = numBufferedDocs - 1; i > 0; --i) {
-      lengths[i] = endOffsets[i] - endOffsets[i - 1];
-      assert lengths[i] >= 0;
-    }
-    writeHeader(docBase, numBufferedDocs, numStoredFields, lengths);
-
-    // compress stored fields to fieldsStream
-    compressor.compress(bufferedDocs.bytes, 0, bufferedDocs.length, fieldsStream);
-
-    // reset
-    docBase += numBufferedDocs;
-    numBufferedDocs = 0;
-    bufferedDocs.length = 0;
-  }
-
-  @Override
-  public void writeField(FieldInfo info, IndexableField field)
-      throws IOException {
-    int bits = 0;
-    final BytesRef bytes;
-    final String string;
-
-    Number number = field.numericValue();
-    if (number != null) {
-      if (number instanceof Byte || number instanceof Short || number instanceof Integer) {
-        bits = NUMERIC_INT;
-      } else if (number instanceof Long) {
-        bits = NUMERIC_LONG;
-      } else if (number instanceof Float) {
-        bits = NUMERIC_FLOAT;
-      } else if (number instanceof Double) {
-        bits = NUMERIC_DOUBLE;
-      } else {
-        throw new IllegalArgumentException("cannot store numeric type " + number.getClass());
-      }
-      string = null;
-      bytes = null;
-    } else {
-      bytes = field.binaryValue();
-      if (bytes != null) {
-        bits = BYTE_ARR;
-        string = null;
-      } else {
-        bits = STRING;
-        string = field.stringValue();
-        if (string == null) {
-          throw new IllegalArgumentException("field " + field.name() + " is stored but does not have binaryValue, stringValue nor numericValue");
+            // save lengths
+            saveInts(lengths, numBufferedDocs, fieldsStream);
         }
-      }
-    }
 
-    final long infoAndBits = (((long) info.number) << TYPE_BITS) | bits;
-    bufferedDocs.writeVLong(infoAndBits);
-
-    if (bytes != null) {
-      bufferedDocs.writeVInt(bytes.length);
-      bufferedDocs.writeBytes(bytes.bytes, bytes.offset, bytes.length);
-    } else if (string != null) {
-      bufferedDocs.writeString(field.stringValue());
-    } else {
-      if (number instanceof Byte || number instanceof Short || number instanceof Integer) {
-        bufferedDocs.writeInt(number.intValue());
-      } else if (number instanceof Long) {
-        bufferedDocs.writeLong(number.longValue());
-      } else if (number instanceof Float) {
-        bufferedDocs.writeInt(Float.floatToIntBits(number.floatValue()));
-      } else if (number instanceof Double) {
-        bufferedDocs.writeLong(Double.doubleToLongBits(number.doubleValue()));
-      } else {
-        throw new AssertionError("Cannot get here");
-      }
-    }
-  }
-
-  @Override
-  public void abort() {
-    IOUtils.closeWhileHandlingException(this);
-    IOUtils.deleteFilesIgnoringExceptions(directory,
-        IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION),
-        IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION));
-  }
-
-  @Override
-  public void finish(FieldInfos fis, int numDocs) throws IOException {
-    if (numBufferedDocs > 0) {
-      flush();
-    } else {
-      assert bufferedDocs.length == 0;
-    }
-    if (docBase != numDocs) {
-      throw new RuntimeException("Wrote " + docBase + " docs, finish called with numDocs=" + numDocs);
-    }
-    indexWriter.finish(numDocs);
-    assert bufferedDocs.length == 0;
-  }
-
-  @Override
-  public int merge(MergeState mergeState) throws IOException {
-    int docCount = 0;
-    int idx = 0;
-
-    for (AtomicReader reader : mergeState.readers) {
-      final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
-      CompressingStoredFieldsReader matchingFieldsReader = null;
-      if (matchingSegmentReader != null) {
-        final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
-        // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
-        if (fieldsReader != null && fieldsReader instanceof CompressingStoredFieldsReader) {
-          matchingFieldsReader = (CompressingStoredFieldsReader) fieldsReader;
+        private bool TriggerFlush()
+        {
+            return bufferedDocs.Length >= chunkSize || // chunks of at least chunkSize bytes
+                numBufferedDocs >= MAX_DOCUMENTS_PER_CHUNK;
         }
-      }
-
-      final int maxDoc = reader.maxDoc();
-      final Bits liveDocs = reader.getLiveDocs();
-
-      if (matchingFieldsReader == null) {
-        // naive merge...
-        for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
-          Document doc = reader.document(i);
-          addDocument(doc, mergeState.fieldInfos);
-          ++docCount;
-          mergeState.checkAbort.work(300);
+
+        private void Flush()
+        {
+            indexWriter.WriteIndex(numBufferedDocs, fieldsStream.FilePointer);
+
+            // transform end offsets into lengths
+            int[] lengths = endOffsets;
+            for (int i = numBufferedDocs - 1; i > 0; --i)
+            {
+                lengths[i] = endOffsets[i] - endOffsets[i - 1];
+            }
+
+            WriteHeader(docBase, numBufferedDocs, numStoredFields, lengths);
+
+            // compress stored fields to fieldsStream
+            compressor.Compress(bufferedDocs.Bytes, 0, bufferedDocs.Length, fieldsStream);
+
+            // reset
+            docBase += numBufferedDocs;
+            numBufferedDocs = 0;
+            bufferedDocs.Length = 0;
         }
-      } else {
-        int docID = nextLiveDoc(0, liveDocs, maxDoc);
-        if (docID < maxDoc) {
-          // not all docs were deleted
-          final ChunkIterator it = matchingFieldsReader.chunkIterator(docID);
-          int[] startOffsets = new int[0];
-          do {
-            // go to the next chunk that contains docID
-            it.next(docID);
-            // transform lengths into offsets
-            if (startOffsets.length < it.chunkDocs) {
-              startOffsets = new int[ArrayUtil.oversize(it.chunkDocs, 4)];
+
+        public override void writeField(FieldInfo info, IndexableField field)
+        {
+          int bits = 0;
+          BytesRef bytes;
+          string str;
+
+          Number number = field.numericValue();
+          if (number != null) {
+            if (number instanceof Byte || number instanceof Short || number instanceof Integer) {
+              bits = NUMERIC_INT;
+            } else if (number instanceof Long) {
+              bits = NUMERIC_LONG;
+            } else if (number instanceof Float) {
+              bits = NUMERIC_FLOAT;
+            } else if (number instanceof Double) {
+              bits = NUMERIC_DOUBLE;
+            } else {
+              throw new IllegalArgumentException("cannot store numeric type " + number.getClass());
+            }
+            str = null;
+            bytes = null;
+          } else {
+            bytes = field.binaryValue();
+            if (bytes != null) {
+              bits = BYTE_ARR;
+              str = null;
+            } else {
+              bits = STRING;
+              str = field.stringValue();
+              if (str == null) {
+                throw new ArgumentException("field " + field.name() + " is stored but does not have binaryValue, stringValue nor numericValue");
+              }
             }
-            for (int i = 1; i < it.chunkDocs; ++i) {
-              startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
+          }
+
+          long infoAndBits = (((long) info.number) << TYPE_BITS) | bits;
+          bufferedDocs.WriteVLong(infoAndBits);
+
+          if (bytes != null) {
+            bufferedDocs.WriteVInt(bytes.length);
+            bufferedDocs.WriteBytes(bytes.bytes, bytes.offset, bytes.length);
+          } else if (str != null) {
+            bufferedDocs.WriteString(field.stringValue());
+          } else {
+            if (number instanceof Byte || number instanceof Short || number instanceof Integer) {
+              bufferedDocs.writeInt(number.intValue());
+            } else if (number instanceof Long) {
+              bufferedDocs.writeLong(number.longValue());
+            } else if (number instanceof Float) {
+              bufferedDocs.writeInt(Float.floatToIntBits(number.floatValue()));
+            } else if (number instanceof Double) {
+              bufferedDocs.writeLong(Double.doubleToLongBits(number.doubleValue()));
+            } else {
+              throw new AssertionError("Cannot get here");
             }
+          }
+        }
 
-            if (compressionMode == matchingFieldsReader.getCompressionMode() // same compression mode
-                && numBufferedDocs == 0 // starting a new chunk
-                && startOffsets[it.chunkDocs - 1] < chunkSize // chunk is small enough
-                && startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] >= chunkSize // chunk is large enough
-                && nextDeletedDoc(it.docBase, liveDocs, it.docBase + it.chunkDocs) == it.docBase + it.chunkDocs) { // no deletion in the chunk
-              assert docID == it.docBase;
-
-              // no need to decompress, just copy data
-              indexWriter.writeIndex(it.chunkDocs, fieldsStream.getFilePointer());
-              writeHeader(this.docBase, it.chunkDocs, it.numStoredFields, it.lengths);
-              it.copyCompressedData(fieldsStream);
-              this.docBase += it.chunkDocs;
-              docID = nextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
-              docCount += it.chunkDocs;
-              mergeState.checkAbort.work(300 * it.chunkDocs);
-            } else {
-              // decompress
-              it.decompress();
-              if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
-                throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
+        public override void Abort() {
+          IOUtils.CloseWhileHandlingException(this);
+          IOUtils.DeleteFilesIgnoringExceptions(directory,
+              IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_EXTENSION),
+              IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION));
+        }
+
+        public override void finish(FieldInfos fis, int numDocs) 
+        {
+          if (numBufferedDocs > 0) {
+            Flush();
+          } else {
+            //assert bufferedDocs.length == 0;
+          }
+          if (docBase != numDocs) {
+            throw new RuntimeException("Wrote " + docBase + " docs, finish called with numDocs=" + numDocs);
+          }
+          indexWriter.finish(numDocs);
+        }
+
+        public override int Merge(MergeState mergeState) 
+        {
+          int docCount = 0;
+          int idx = 0;
+
+          foreach (AtomicReader reader in mergeState.readers) 
+          {
+            SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
+            CompressingStoredFieldsReader matchingFieldsReader = null;
+            if (matchingSegmentReader != null) 
+            {
+              StoredFieldsReader fieldsReader = matchingSegmentReader.FieldsReader;
+              // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
+              if (fieldsReader != null && fieldsReader is CompressingStoredFieldsReader) 
+              {
+                matchingFieldsReader = (CompressingStoredFieldsReader) fieldsReader;
               }
-              // copy non-deleted docs
-              for (; docID < it.docBase + it.chunkDocs; docID = nextLiveDoc(docID + 1, liveDocs, maxDoc)) {
-                final int diff = docID - it.docBase;
-                startDocument(it.numStoredFields[diff]);
-                bufferedDocs.writeBytes(it.bytes.bytes, it.bytes.offset + startOffsets[diff], it.lengths[diff]);
-                finishDocument();
+            }
+
+            int maxDoc = reader.MaxDoc;
+            IBits liveDocs = reader.LiveDocs;
+
+            if (matchingFieldsReader == null) {
+              // naive merge...
+              for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = NextLiveDoc(i + 1, liveDocs, maxDoc)) {
+                Document doc = reader.Document(i);
+                AddDocument(doc, mergeState.fieldInfos);
                 ++docCount;
-                mergeState.checkAbort.work(300);
+                mergeState.checkAbort.Work(300);
+              }
+            } else {
+              int docID = NextLiveDoc(0, liveDocs, maxDoc);
+              if (docID < maxDoc) {
+                // not all docs were deleted
+                ChunkIterator it = matchingFieldsReader.ChunkIterator(docID);
+                int[] startOffsets = new int[0];
+                do {
+                  // go to the next chunk that contains docID
+                  it.next(docID);
+                  // transform lengths into offsets
+                  if (startOffsets.Length < it.chunkDocs) {
+                    startOffsets = new int[ArrayUtil.Oversize(it.chunkDocs, 4)];
+                  }
+                  for (int i = 1; i < it.chunkDocs; ++i) {
+                    startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
+                  }
+
+                  if (compressionMode == matchingFieldsReader.getCompressionMode() // same compression mode
+                      && numBufferedDocs == 0 // starting a new chunk
+                      && startOffsets[it.chunkDocs - 1] < chunkSize // chunk is small enough
+                      && startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] >= chunkSize // chunk is large enough
+                      && nextDeletedDoc(it.docBase, liveDocs, it.docBase + it.chunkDocs) == it.docBase + it.chunkDocs) { // no deletion in the chunk
+
+                    // no need to decompress, just copy data
+                    indexWriter.writeIndex(it.chunkDocs, fieldsStream.FilePointer);
+                    WriteHeader(this.docBase, it.chunkDocs, it.numStoredFields, it.lengths);
+                    it.copyCompressedData(fieldsStream);
+                    this.docBase += it.chunkDocs;
+                    docID = NextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
+                    docCount += it.chunkDocs;
+                    mergeState.checkAbort.Work(300 * it.chunkDocs);
+                  } else {
+                    // decompress
+                    it.decompress();
+                    if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
+                      throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
+                    }
+                    // copy non-deleted docs
+                    for (; docID < it.docBase + it.chunkDocs; docID = NextLiveDoc(docID + 1, liveDocs, maxDoc)) {
+                      int diff = docID - it.docBase;
+                      StartDocument(it.numStoredFields[diff]);
+                      bufferedDocs.WriteBytes(it.bytes.bytes, it.bytes.offset + startOffsets[diff], it.lengths[diff]);
+                      FinishDocument();
+                      ++docCount;
+                      mergeState.checkAbort.Work(300);
+                    }
+                  }
+                } while (docID < maxDoc);
               }
             }
-          } while (docID < maxDoc);
+          }
+
+          Finish(mergeState.fieldInfos, docCount);
+          return docCount;
         }
-      }
-    }
-    finish(mergeState.fieldInfos, docCount);
-    return docCount;
-  }
 
-  private static int nextLiveDoc(int doc, Bits liveDocs, int maxDoc) {
-    if (liveDocs == null) {
-      return doc;
-    }
-    while (doc < maxDoc && !liveDocs.get(doc)) {
-      ++doc;
-    }
-    return doc;
-  }
+        private static int NextLiveDoc(int doc, IBits liveDocs, int maxDoc)
+        {
+            if (liveDocs == null)
+            {
+                return doc;
+            }
+            while (doc < maxDoc && !liveDocs[doc])
+            {
+                ++doc;
+            }
+            return doc;
+        }
 
-  private static int nextDeletedDoc(int doc, Bits liveDocs, int maxDoc) {
-    if (liveDocs == null) {
-      return maxDoc;
-    }
-    while (doc < maxDoc && liveDocs.get(doc)) {
-      ++doc;
-    }
-    return doc;
-  }
+        private static int nextDeletedDoc(int doc, Bits liveDocs, int maxDoc)
+        {
+            if (liveDocs == null)
+            {
+                return maxDoc;
+            }
+            while (doc < maxDoc && liveDocs[doc])
+            {
+                ++doc;
+            }
+            return doc;
+        }
 
+    }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/64c13f3c/src/core/Codecs/Compressing/Compressor.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/Compressor.cs b/src/core/Codecs/Compressing/Compressor.cs
index 48fdb74..00c0053 100644
--- a/src/core/Codecs/Compressing/Compressor.cs
+++ b/src/core/Codecs/Compressing/Compressor.cs
@@ -32,7 +32,7 @@ namespace Lucene.Net.Codecs.Compressing
          * compressor to add all necessary information so that a {@link Decompressor}
          * will know when to stop decompressing bytes from the stream.
          */
-        public abstract void Compress(byte[] bytes, int off, int len, DataOutput output);
+        public abstract void Compress(sbyte[] bytes, int off, int len, DataOutput output);
 
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/64c13f3c/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs b/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs
index a0b8eba..d6b873d 100644
--- a/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs
+++ b/src/core/Codecs/Compressing/GrowableByteArrayDataOutput.cs
@@ -25,13 +25,37 @@ namespace Lucene.Net.Codecs.Compressing
 {
     internal sealed class GrowableByteArrayDataOutput : DataOutput
     {
-        sbyte[] bytes;
-        int length;
+        private sbyte[] _bytes;
+        private int _length;
 
-        GrowableByteArrayDataOutput(int cp)
+        public GrowableByteArrayDataOutput(int cp)
         {
-            this.bytes = new sbyte[ArrayUtil.Oversize(cp, 1)];
-            this.length = 0;
+            Bytes = new sbyte[ArrayUtil.Oversize(cp, 1)];
+            Length = 0;
+        }
+
+        public sbyte[] Bytes
+        {
+            get
+            {
+                return _bytes;
+            }
+            set
+            {
+                _bytes = value;
+            }
+        }
+
+        public int Length
+        {
+            get
+            {
+                return _length;
+            }
+            set
+            {
+                _length = value;
+            }
         }
 
         public override void WriteByte(byte b)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/64c13f3c/src/core/Lucene.Net.csproj
----------------------------------------------------------------------
diff --git a/src/core/Lucene.Net.csproj b/src/core/Lucene.Net.csproj
index ce097bb..85f9818 100644
--- a/src/core/Lucene.Net.csproj
+++ b/src/core/Lucene.Net.csproj
@@ -187,7 +187,9 @@
     <Compile Include="Codecs\Codec.cs" />
     <Compile Include="Codecs\CodecUtil.cs" />
     <Compile Include="Codecs\Compressing\CompressingStoredFieldsFormat.cs" />
+    <Compile Include="Codecs\Compressing\CompressingStoredFieldsIndexReader.cs" />
     <Compile Include="Codecs\Compressing\CompressingStoredFieldsReader.cs" />
+    <Compile Include="Codecs\Compressing\CompressingStoredFieldsWriter.cs" />
     <Compile Include="Codecs\Compressing\CompressionMode.cs" />
     <Compile Include="Codecs\Compressing\Compressor.cs" />
     <Compile Include="Codecs\Compressing\Decompressor.cs" />


[42/50] [abbrv] Initial port of classic QueryParser. Broken.

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Classic/QueryParserConstants.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/QueryParserConstants.cs b/src/contrib/QueryParsers/Classic/QueryParserConstants.cs
new file mode 100644
index 0000000..1d1c5ca
--- /dev/null
+++ b/src/contrib/QueryParsers/Classic/QueryParserConstants.cs
@@ -0,0 +1,126 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.QueryParsers.Classic
+{
+    // .NET Port: you can't have fields in an interface, so this mainly serves as a marker I suppose
+    public interface IQueryParserConstants
+    {
+    }
+
+    public static class QueryParserConstants
+    {
+        /** End of File. */
+        public const int EOF = 0;
+        /** RegularExpression Id. */
+        public const int _NUM_CHAR = 1;
+        /** RegularExpression Id. */
+        public const int _ESCAPED_CHAR = 2;
+        /** RegularExpression Id. */
+        public const int _TERM_START_CHAR = 3;
+        /** RegularExpression Id. */
+        public const int _TERM_CHAR = 4;
+        /** RegularExpression Id. */
+        public const int _WHITESPACE = 5;
+        /** RegularExpression Id. */
+        public const int _QUOTED_CHAR = 6;
+        /** RegularExpression Id. */
+        public const int AND = 8;
+        /** RegularExpression Id. */
+        public const int OR = 9;
+        /** RegularExpression Id. */
+        public const int NOT = 10;
+        /** RegularExpression Id. */
+        public const int PLUS = 11;
+        /** RegularExpression Id. */
+        public const int MINUS = 12;
+        /** RegularExpression Id. */
+        public const int BAREOPER = 13;
+        /** RegularExpression Id. */
+        public const int LPAREN = 14;
+        /** RegularExpression Id. */
+        public const int RPAREN = 15;
+        /** RegularExpression Id. */
+        public const int COLON = 16;
+        /** RegularExpression Id. */
+        public const int STAR = 17;
+        /** RegularExpression Id. */
+        public const int CARAT = 18;
+        /** RegularExpression Id. */
+        public const int QUOTED = 19;
+        /** RegularExpression Id. */
+        public const int TERM = 20;
+        /** RegularExpression Id. */
+        public const int FUZZY_SLOP = 21;
+        /** RegularExpression Id. */
+        public const int PREFIXTERM = 22;
+        /** RegularExpression Id. */
+        public const int WILDTERM = 23;
+        /** RegularExpression Id. */
+        public const int REGEXPTERM = 24;
+        /** RegularExpression Id. */
+        public const int RANGEIN_START = 25;
+        /** RegularExpression Id. */
+        public const int RANGEEX_START = 26;
+        /** RegularExpression Id. */
+        public const int NUMBER = 27;
+        /** RegularExpression Id. */
+        public const int RANGE_TO = 28;
+        /** RegularExpression Id. */
+        public const int RANGEIN_END = 29;
+        /** RegularExpression Id. */
+        public const int RANGEEX_END = 30;
+        /** RegularExpression Id. */
+        public const int RANGE_QUOTED = 31;
+        /** RegularExpression Id. */
+        public const int RANGE_GOOP = 32;
+
+        /** Lexical state. */
+        public const int Boost = 0;
+        /** Lexical state. */
+        public const int Range = 1;
+        /** Lexical state. */
+        public const int DEFAULT = 2;
+
+        /** Literal token values. */
+        public static String[] tokenImage = {
+    "<EOF>",
+    "<_NUM_CHAR>",
+    "<_ESCAPED_CHAR>",
+    "<_TERM_START_CHAR>",
+    "<_TERM_CHAR>",
+    "<_WHITESPACE>",
+    "<_QUOTED_CHAR>",
+    "<token of kind 7>",
+    "<AND>",
+    "<OR>",
+    "<NOT>",
+    "\"+\"",
+    "\"-\"",
+    "<BAREOPER>",
+    "\"(\"",
+    "\")\"",
+    "\":\"",
+    "\"*\"",
+    "\"^\"",
+    "<QUOTED>",
+    "<TERM>",
+    "<FUZZY_SLOP>",
+    "<PREFIXTERM>",
+    "<WILDTERM>",
+    "<REGEXPTERM>",
+    "\"[\"",
+    "\"{\"",
+    "<NUMBER>",
+    "\"TO\"",
+    "\"]\"",
+    "\"}\"",
+    "<RANGE_QUOTED>",
+    "<RANGE_GOOP>",
+  };
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Classic/QueryParserTokenManager.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/QueryParserTokenManager.cs b/src/contrib/QueryParsers/Classic/QueryParserTokenManager.cs
new file mode 100644
index 0000000..29ed068
--- /dev/null
+++ b/src/contrib/QueryParsers/Classic/QueryParserTokenManager.cs
@@ -0,0 +1,1188 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.QueryParsers.Classic
+{
+    public class QueryParserTokenManager : IQueryParserConstants
+    {
+        /** Debug output. */
+        public TextWriter debugStream = Console.Out;
+        /** Set debug output. */
+        public void SetDebugStream(TextWriter ds) { debugStream = ds; }
+
+        private int jjStopStringLiteralDfa_2(int pos, long active0)
+        {
+            switch (pos)
+            {
+                default:
+                    return -1;
+            }
+        }
+
+        private int jjStartNfa_2(int pos, long active0)
+        {
+            return jjMoveNfa_2(jjStopStringLiteralDfa_2(pos, active0), pos + 1);
+        }
+
+        private int jjStopAtPos(int pos, int kind)
+        {
+            jjmatchedKind = kind;
+            jjmatchedPos = pos;
+            return pos + 1;
+        }
+
+        private int jjMoveStringLiteralDfa0_2()
+        {
+            switch (curChar)
+            {
+                case (char)40:
+                    return jjStopAtPos(0, 14);
+                case (char)41:
+                    return jjStopAtPos(0, 15);
+                case (char)42:
+                    return jjStartNfaWithStates_2(0, 17, 49);
+                case (char)43:
+                    return jjStartNfaWithStates_2(0, 11, 15);
+                case (char)45:
+                    return jjStartNfaWithStates_2(0, 12, 15);
+                case (char)58:
+                    return jjStopAtPos(0, 16);
+                case (char)91:
+                    return jjStopAtPos(0, 25);
+                case (char)94:
+                    return jjStopAtPos(0, 18);
+                case (char)123:
+                    return jjStopAtPos(0, 26);
+                default:
+                    return jjMoveNfa_2(0, 0);
+            }
+        }
+
+        private int jjStartNfaWithStates_2(int pos, int kind, int state)
+        {
+            jjmatchedKind = kind;
+            jjmatchedPos = pos;
+            try { curChar = input_stream.ReadChar(); }
+            catch (IOException e) { return pos + 1; }
+            return jjMoveNfa_2(state, pos + 1);
+        }
+
+        internal static readonly long[] jjbitVec0 = {
+            0x1L, 0x0L, 0x0L, 0x0L
+        };
+        internal static readonly long[] jjbitVec1 = {
+            unchecked((long)0xfffffffffffffffeL), unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL)
+        };
+        internal static readonly long[] jjbitVec3 = {
+            0x0L, 0x0L, unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL)
+        };
+        internal static readonly long[] jjbitVec4 = {
+            unchecked((long)0xfffefffffffffffeL), unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL), unchecked((long)0xffffffffffffffffL)
+        };
+
+        private int jjMoveNfa_2(int startState, int curPos)
+        {
+            int startsAt = 0;
+            jjnewStateCnt = 49;
+            int i = 1;
+            jjstateSet[0] = startState;
+            int kind = 0x7fffffff;
+            for (; ; )
+            {
+                if (++jjround == 0x7fffffff)
+                    ReInitRounds();
+                if (curChar < 64)
+                {
+                    long l = 1L << curChar;
+                    do
+                    {
+                        switch (jjstateSet[--i])
+                        {
+                            case 49:
+                            case 33:
+                                if ((unchecked((long)0xfbff7cf8ffffd9ffL) & l) == 0L)
+                                    break;
+                                if (kind > 23)
+                                    kind = 23;
+                                jjCheckNAddTwoStates(33, 34);
+                                break;
+                            case 0:
+                                if ((unchecked((long)0xfbff54f8ffffd9ffL) & l) != 0L)
+                                {
+                                    if (kind > 23)
+                                        kind = 23;
+                                    jjCheckNAddTwoStates(33, 34);
+                                }
+                                else if ((0x100002600L & l) != 0L)
+                                {
+                                    if (kind > 7)
+                                        kind = 7;
+                                }
+                                else if ((0x280200000000L & l) != 0L)
+                                    jjstateSet[jjnewStateCnt++] = 15;
+                                else if (curChar == 47)
+                                    jjCheckNAddStates(0, 2);
+                                else if (curChar == 34)
+                                    jjCheckNAddStates(3, 5);
+                                if ((0x7bff50f8ffffd9ffL & l) != 0L)
+                                {
+                                    if (kind > 20)
+                                        kind = 20;
+                                    jjCheckNAddStates(6, 10);
+                                }
+                                else if (curChar == 42)
+                                {
+                                    if (kind > 22)
+                                        kind = 22;
+                                }
+                                else if (curChar == 33)
+                                {
+                                    if (kind > 10)
+                                        kind = 10;
+                                }
+                                if (curChar == 38)
+                                    jjstateSet[jjnewStateCnt++] = 4;
+                                break;
+                            case 4:
+                                if (curChar == 38 && kind > 8)
+                                    kind = 8;
+                                break;
+                            case 5:
+                                if (curChar == 38)
+                                    jjstateSet[jjnewStateCnt++] = 4;
+                                break;
+                            case 13:
+                                if (curChar == 33 && kind > 10)
+                                    kind = 10;
+                                break;
+                            case 14:
+                                if ((0x280200000000L & l) != 0L)
+                                    jjstateSet[jjnewStateCnt++] = 15;
+                                break;
+                            case 15:
+                                if ((0x100002600L & l) != 0L && kind > 13)
+                                    kind = 13;
+                                break;
+                            case 16:
+                                if (curChar == 34)
+                                    jjCheckNAddStates(3, 5);
+                                break;
+                            case 17:
+                                if ((unchecked((long)0xfffffffbffffffffL) & l) != 0L)
+                                    jjCheckNAddStates(3, 5);
+                                break;
+                            case 19:
+                                jjCheckNAddStates(3, 5);
+                                break;
+                            case 20:
+                                if (curChar == 34 && kind > 19)
+                                    kind = 19;
+                                break;
+                            case 22:
+                                if ((0x3ff000000000000L & l) == 0L)
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddStates(11, 14);
+                                break;
+                            case 23:
+                                if (curChar == 46)
+                                    jjCheckNAdd(24);
+                                break;
+                            case 24:
+                                if ((0x3ff000000000000L & l) == 0L)
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddStates(15, 17);
+                                break;
+                            case 25:
+                                if ((0x7bff78f8ffffd9ffL & l) == 0L)
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(25, 26);
+                                break;
+                            case 27:
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(25, 26);
+                                break;
+                            case 28:
+                                if ((0x7bff78f8ffffd9ffL & l) == 0L)
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(28, 29);
+                                break;
+                            case 30:
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(28, 29);
+                                break;
+                            case 31:
+                                if (curChar == 42 && kind > 22)
+                                    kind = 22;
+                                break;
+                            case 32:
+                                if ((unchecked((long)0xfbff54f8ffffd9ffL) & l) == 0L)
+                                    break;
+                                if (kind > 23)
+                                    kind = 23;
+                                jjCheckNAddTwoStates(33, 34);
+                                break;
+                            case 35:
+                                if (kind > 23)
+                                    kind = 23;
+                                jjCheckNAddTwoStates(33, 34);
+                                break;
+                            case 36:
+                            case 38:
+                                if (curChar == 47)
+                                    jjCheckNAddStates(0, 2);
+                                break;
+                            case 37:
+                                if ((unchecked((long)0xffff7fffffffffffL) & l) != 0L)
+                                    jjCheckNAddStates(0, 2);
+                                break;
+                            case 40:
+                                if (curChar == 47 && kind > 24)
+                                    kind = 24;
+                                break;
+                            case 41:
+                                if ((0x7bff50f8ffffd9ffL & l) == 0L)
+                                    break;
+                                if (kind > 20)
+                                    kind = 20;
+                                jjCheckNAddStates(6, 10);
+                                break;
+                            case 42:
+                                if ((0x7bff78f8ffffd9ffL & l) == 0L)
+                                    break;
+                                if (kind > 20)
+                                    kind = 20;
+                                jjCheckNAddTwoStates(42, 43);
+                                break;
+                            case 44:
+                                if (kind > 20)
+                                    kind = 20;
+                                jjCheckNAddTwoStates(42, 43);
+                                break;
+                            case 45:
+                                if ((0x7bff78f8ffffd9ffL & l) != 0L)
+                                    jjCheckNAddStates(18, 20);
+                                break;
+                            case 47:
+                                jjCheckNAddStates(18, 20);
+                                break;
+                            default: break;
+                        }
+                    } while (i != startsAt);
+                }
+                else if (curChar < 128)
+                {
+                    long l = 1L << (curChar & 077);
+                    do
+                    {
+                        switch (jjstateSet[--i])
+                        {
+                            case 49:
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) != 0L)
+                                {
+                                    if (kind > 23)
+                                        kind = 23;
+                                    jjCheckNAddTwoStates(33, 34);
+                                }
+                                else if (curChar == 92)
+                                    jjCheckNAddTwoStates(35, 35);
+                                break;
+                            case 0:
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) != 0L)
+                                {
+                                    if (kind > 20)
+                                        kind = 20;
+                                    jjCheckNAddStates(6, 10);
+                                }
+                                else if (curChar == 92)
+                                    jjCheckNAddStates(21, 23);
+                                else if (curChar == 126)
+                                {
+                                    if (kind > 21)
+                                        kind = 21;
+                                    jjCheckNAddStates(24, 26);
+                                }
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) != 0L)
+                                {
+                                    if (kind > 23)
+                                        kind = 23;
+                                    jjCheckNAddTwoStates(33, 34);
+                                }
+                                if (curChar == 78)
+                                    jjstateSet[jjnewStateCnt++] = 11;
+                                else if (curChar == 124)
+                                    jjstateSet[jjnewStateCnt++] = 8;
+                                else if (curChar == 79)
+                                    jjstateSet[jjnewStateCnt++] = 6;
+                                else if (curChar == 65)
+                                    jjstateSet[jjnewStateCnt++] = 2;
+                                break;
+                            case 1:
+                                if (curChar == 68 && kind > 8)
+                                    kind = 8;
+                                break;
+                            case 2:
+                                if (curChar == 78)
+                                    jjstateSet[jjnewStateCnt++] = 1;
+                                break;
+                            case 3:
+                                if (curChar == 65)
+                                    jjstateSet[jjnewStateCnt++] = 2;
+                                break;
+                            case 6:
+                                if (curChar == 82 && kind > 9)
+                                    kind = 9;
+                                break;
+                            case 7:
+                                if (curChar == 79)
+                                    jjstateSet[jjnewStateCnt++] = 6;
+                                break;
+                            case 8:
+                                if (curChar == 124 && kind > 9)
+                                    kind = 9;
+                                break;
+                            case 9:
+                                if (curChar == 124)
+                                    jjstateSet[jjnewStateCnt++] = 8;
+                                break;
+                            case 10:
+                                if (curChar == 84 && kind > 10)
+                                    kind = 10;
+                                break;
+                            case 11:
+                                if (curChar == 79)
+                                    jjstateSet[jjnewStateCnt++] = 10;
+                                break;
+                            case 12:
+                                if (curChar == 78)
+                                    jjstateSet[jjnewStateCnt++] = 11;
+                                break;
+                            case 17:
+                                if ((unchecked((long)0xffffffffefffffffL) & l) != 0L)
+                                    jjCheckNAddStates(3, 5);
+                                break;
+                            case 18:
+                                if (curChar == 92)
+                                    jjstateSet[jjnewStateCnt++] = 19;
+                                break;
+                            case 19:
+                                jjCheckNAddStates(3, 5);
+                                break;
+                            case 21:
+                                if (curChar != 126)
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddStates(24, 26);
+                                break;
+                            case 25:
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(25, 26);
+                                break;
+                            case 26:
+                                if (curChar == 92)
+                                    jjAddStates(27, 28);
+                                break;
+                            case 27:
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(25, 26);
+                                break;
+                            case 28:
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(28, 29);
+                                break;
+                            case 29:
+                                if (curChar == 92)
+                                    jjAddStates(29, 30);
+                                break;
+                            case 30:
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(28, 29);
+                                break;
+                            case 32:
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                    break;
+                                if (kind > 23)
+                                    kind = 23;
+                                jjCheckNAddTwoStates(33, 34);
+                                break;
+                            case 33:
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                    break;
+                                if (kind > 23)
+                                    kind = 23;
+                                jjCheckNAddTwoStates(33, 34);
+                                break;
+                            case 34:
+                                if (curChar == 92)
+                                    jjCheckNAddTwoStates(35, 35);
+                                break;
+                            case 35:
+                                if (kind > 23)
+                                    kind = 23;
+                                jjCheckNAddTwoStates(33, 34);
+                                break;
+                            case 37:
+                                jjAddStates(0, 2);
+                                break;
+                            case 39:
+                                if (curChar == 92)
+                                    jjstateSet[jjnewStateCnt++] = 38;
+                                break;
+                            case 41:
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                    break;
+                                if (kind > 20)
+                                    kind = 20;
+                                jjCheckNAddStates(6, 10);
+                                break;
+                            case 42:
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) == 0L)
+                                    break;
+                                if (kind > 20)
+                                    kind = 20;
+                                jjCheckNAddTwoStates(42, 43);
+                                break;
+                            case 43:
+                                if (curChar == 92)
+                                    jjCheckNAddTwoStates(44, 44);
+                                break;
+                            case 44:
+                                if (kind > 20)
+                                    kind = 20;
+                                jjCheckNAddTwoStates(42, 43);
+                                break;
+                            case 45:
+                                if ((unchecked((long)0x97ffffff87ffffffL) & l) != 0L)
+                                    jjCheckNAddStates(18, 20);
+                                break;
+                            case 46:
+                                if (curChar == 92)
+                                    jjCheckNAddTwoStates(47, 47);
+                                break;
+                            case 47:
+                                jjCheckNAddStates(18, 20);
+                                break;
+                            case 48:
+                                if (curChar == 92)
+                                    jjCheckNAddStates(21, 23);
+                                break;
+                            default: break;
+                        }
+                    } while (i != startsAt);
+                }
+                else
+                {
+                    int hiByte = (int)(curChar >> 8);
+                    int i1 = hiByte >> 6;
+                    long l1 = 1L << (hiByte & 077);
+                    int i2 = (curChar & 0xff) >> 6;
+                    long l2 = 1L << (curChar & 077);
+                    do
+                    {
+                        switch (jjstateSet[--i])
+                        {
+                            case 49:
+                            case 33:
+                                if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 23)
+                                    kind = 23;
+                                jjCheckNAddTwoStates(33, 34);
+                                break;
+                            case 0:
+                                if (jjCanMove_0(hiByte, i1, i2, l1, l2))
+                                {
+                                    if (kind > 7)
+                                        kind = 7;
+                                }
+                                if (jjCanMove_2(hiByte, i1, i2, l1, l2))
+                                {
+                                    if (kind > 23)
+                                        kind = 23;
+                                    jjCheckNAddTwoStates(33, 34);
+                                }
+                                if (jjCanMove_2(hiByte, i1, i2, l1, l2))
+                                {
+                                    if (kind > 20)
+                                        kind = 20;
+                                    jjCheckNAddStates(6, 10);
+                                }
+                                break;
+                            case 15:
+                                if (jjCanMove_0(hiByte, i1, i2, l1, l2) && kind > 13)
+                                    kind = 13;
+                                break;
+                            case 17:
+                            case 19:
+                                if (jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                    jjCheckNAddStates(3, 5);
+                                break;
+                            case 25:
+                                if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(25, 26);
+                                break;
+                            case 27:
+                                if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(25, 26);
+                                break;
+                            case 28:
+                                if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(28, 29);
+                                break;
+                            case 30:
+                                if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 21)
+                                    kind = 21;
+                                jjCheckNAddTwoStates(28, 29);
+                                break;
+                            case 32:
+                                if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 23)
+                                    kind = 23;
+                                jjCheckNAddTwoStates(33, 34);
+                                break;
+                            case 35:
+                                if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 23)
+                                    kind = 23;
+                                jjCheckNAddTwoStates(33, 34);
+                                break;
+                            case 37:
+                                if (jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                    jjAddStates(0, 2);
+                                break;
+                            case 41:
+                                if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 20)
+                                    kind = 20;
+                                jjCheckNAddStates(6, 10);
+                                break;
+                            case 42:
+                                if (!jjCanMove_2(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 20)
+                                    kind = 20;
+                                jjCheckNAddTwoStates(42, 43);
+                                break;
+                            case 44:
+                                if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 20)
+                                    kind = 20;
+                                jjCheckNAddTwoStates(42, 43);
+                                break;
+                            case 45:
+                                if (jjCanMove_2(hiByte, i1, i2, l1, l2))
+                                    jjCheckNAddStates(18, 20);
+                                break;
+                            case 47:
+                                if (jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                    jjCheckNAddStates(18, 20);
+                                break;
+                            default: break;
+                        }
+                    } while (i != startsAt);
+                }
+                if (kind != 0x7fffffff)
+                {
+                    jjmatchedKind = kind;
+                    jjmatchedPos = curPos;
+                    kind = 0x7fffffff;
+                }
+                ++curPos;
+                if ((i = jjnewStateCnt) == (startsAt = 49 - (jjnewStateCnt = startsAt)))
+                    return curPos;
+                try { curChar = input_stream.ReadChar(); }
+                catch (IOException e) { return curPos; }
+            }
+        }
+
+        private int jjMoveStringLiteralDfa0_0()
+        {
+            return jjMoveNfa_0(0, 0);
+        }
+
+        private int jjMoveNfa_0(int startState, int curPos)
+        {
+            int startsAt = 0;
+            jjnewStateCnt = 3;
+            int i = 1;
+            jjstateSet[0] = startState;
+            int kind = 0x7fffffff;
+            for (; ; )
+            {
+                if (++jjround == 0x7fffffff)
+                    ReInitRounds();
+                if (curChar < 64)
+                {
+                    long l = 1L << curChar;
+                    do
+                    {
+                        switch (jjstateSet[--i])
+                        {
+                            case 0:
+                                if ((0x3ff000000000000L & l) == 0L)
+                                    break;
+                                if (kind > 27)
+                                    kind = 27;
+                                jjAddStates(31, 32);
+                                break;
+                            case 1:
+                                if (curChar == 46)
+                                    jjCheckNAdd(2);
+                                break;
+                            case 2:
+                                if ((0x3ff000000000000L & l) == 0L)
+                                    break;
+                                if (kind > 27)
+                                    kind = 27;
+                                jjCheckNAdd(2);
+                                break;
+                            default: break;
+                        }
+                    } while (i != startsAt);
+                }
+                else if (curChar < 128)
+                {
+                    long l = 1L << (curChar & 077);
+                    do
+                    {
+                        switch (jjstateSet[--i])
+                        {
+                            default: break;
+                        }
+                    } while (i != startsAt);
+                }
+                else
+                {
+                    int hiByte = (int)(curChar >> 8);
+                    int i1 = hiByte >> 6;
+                    long l1 = 1L << (hiByte & 077);
+                    int i2 = (curChar & 0xff) >> 6;
+                    long l2 = 1L << (curChar & 077);
+                    do
+                    {
+                        switch (jjstateSet[--i])
+                        {
+                            default: break;
+                        }
+                    } while (i != startsAt);
+                }
+                if (kind != 0x7fffffff)
+                {
+                    jjmatchedKind = kind;
+                    jjmatchedPos = curPos;
+                    kind = 0x7fffffff;
+                }
+                ++curPos;
+                if ((i = jjnewStateCnt) == (startsAt = 3 - (jjnewStateCnt = startsAt)))
+                    return curPos;
+                try { curChar = input_stream.ReadChar(); }
+                catch (IOException e) { return curPos; }
+            }
+        }
+
+        private int jjStopStringLiteralDfa_1(int pos, long active0)
+        {
+            switch (pos)
+            {
+                case 0:
+                    if ((active0 & 0x10000000L) != 0L)
+                    {
+                        jjmatchedKind = 32;
+                        return 6;
+                    }
+                    return -1;
+                default:
+                    return -1;
+            }
+        }
+
+        private int jjStartNfa_1(int pos, long active0)
+        {
+            return jjMoveNfa_1(jjStopStringLiteralDfa_1(pos, active0), pos + 1);
+        }
+
+        private int jjMoveStringLiteralDfa0_1()
+        {
+            switch ((int)curChar)
+            {
+                case 84:
+                    return jjMoveStringLiteralDfa1_1(0x10000000L);
+                case 93:
+                    return jjStopAtPos(0, 29);
+                case 125:
+                    return jjStopAtPos(0, 30);
+                default:
+                    return jjMoveNfa_1(0, 0);
+            }
+        }
+
+        private int jjMoveStringLiteralDfa1_1(long active0)
+        {
+            try { curChar = input_stream.ReadChar(); }
+            catch (IOException e)
+            {
+                jjStopStringLiteralDfa_1(0, active0);
+                return 1;
+            }
+            switch ((int)curChar)
+            {
+                case 79:
+                    if ((active0 & 0x10000000L) != 0L)
+                        return jjStartNfaWithStates_1(1, 28, 6);
+                    break;
+                default:
+                    break;
+            }
+            return jjStartNfa_1(0, active0);
+        }
+
+        private int jjStartNfaWithStates_1(int pos, int kind, int state)
+        {
+            jjmatchedKind = kind;
+            jjmatchedPos = pos;
+            try { curChar = input_stream.ReadChar(); }
+            catch (IOException e) { return pos + 1; }
+            return jjMoveNfa_1(state, pos + 1);
+        }
+
+        private int jjMoveNfa_1(int startState, int curPos)
+        {
+            int startsAt = 0;
+            jjnewStateCnt = 7;
+            int i = 1;
+            jjstateSet[0] = startState;
+            int kind = 0x7fffffff;
+            for (; ; )
+            {
+                if (++jjround == 0x7fffffff)
+                    ReInitRounds();
+                if (curChar < 64)
+                {
+                    long l = 1L << curChar;
+                    do
+                    {
+                        switch (jjstateSet[--i])
+                        {
+                            case 0:
+                                if ((unchecked((long)0xfffffffeffffffffL) & l) != 0L)
+                                {
+                                    if (kind > 32)
+                                        kind = 32;
+                                    jjCheckNAdd(6);
+                                }
+                                if ((0x100002600L & l) != 0L)
+                                {
+                                    if (kind > 7)
+                                        kind = 7;
+                                }
+                                else if (curChar == 34)
+                                    jjCheckNAddTwoStates(2, 4);
+                                break;
+                            case 1:
+                                if (curChar == 34)
+                                    jjCheckNAddTwoStates(2, 4);
+                                break;
+                            case 2:
+                                if ((unchecked((long)0xfffffffbffffffffL) & l) != 0L)
+                                    jjCheckNAddStates(33, 35);
+                                break;
+                            case 3:
+                                if (curChar == 34)
+                                    jjCheckNAddStates(33, 35);
+                                break;
+                            case 5:
+                                if (curChar == 34 && kind > 31)
+                                    kind = 31;
+                                break;
+                            case 6:
+                                if ((unchecked((long)0xfffffffeffffffffL) & l) == 0L)
+                                    break;
+                                if (kind > 32)
+                                    kind = 32;
+                                jjCheckNAdd(6);
+                                break;
+                            default: break;
+                        }
+                    } while (i != startsAt);
+                }
+                else if (curChar < 128)
+                {
+                    long l = 1L << (curChar & 077);
+                    do
+                    {
+                        switch (jjstateSet[--i])
+                        {
+                            case 0:
+                            case 6:
+                                if ((unchecked((long)0xdfffffffdfffffffL) & l) == 0L)
+                                    break;
+                                if (kind > 32)
+                                    kind = 32;
+                                jjCheckNAdd(6);
+                                break;
+                            case 2:
+                                jjAddStates(33, 35);
+                                break;
+                            case 4:
+                                if (curChar == 92)
+                                    jjstateSet[jjnewStateCnt++] = 3;
+                                break;
+                            default: break;
+                        }
+                    } while (i != startsAt);
+                }
+                else
+                {
+                    int hiByte = (int)(curChar >> 8);
+                    int i1 = hiByte >> 6;
+                    long l1 = 1L << (hiByte & 077);
+                    int i2 = (curChar & 0xff) >> 6;
+                    long l2 = 1L << (curChar & 077);
+                    do
+                    {
+                        switch (jjstateSet[--i])
+                        {
+                            case 0:
+                                if (jjCanMove_0(hiByte, i1, i2, l1, l2))
+                                {
+                                    if (kind > 7)
+                                        kind = 7;
+                                }
+                                if (jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                {
+                                    if (kind > 32)
+                                        kind = 32;
+                                    jjCheckNAdd(6);
+                                }
+                                break;
+                            case 2:
+                                if (jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                    jjAddStates(33, 35);
+                                break;
+                            case 6:
+                                if (!jjCanMove_1(hiByte, i1, i2, l1, l2))
+                                    break;
+                                if (kind > 32)
+                                    kind = 32;
+                                jjCheckNAdd(6);
+                                break;
+                            default: break;
+                        }
+                    } while (i != startsAt);
+                }
+                if (kind != 0x7fffffff)
+                {
+                    jjmatchedKind = kind;
+                    jjmatchedPos = curPos;
+                    kind = 0x7fffffff;
+                }
+                ++curPos;
+                if ((i = jjnewStateCnt) == (startsAt = 7 - (jjnewStateCnt = startsAt)))
+                    return curPos;
+                try { curChar = input_stream.ReadChar(); }
+                catch (IOException e) { return curPos; }
+            }
+        }
+
+        internal static readonly int[] jjnextStates = {
+           37, 39, 40, 17, 18, 20, 42, 45, 31, 46, 43, 22, 23, 25, 26, 24, 
+           25, 26, 45, 31, 46, 44, 47, 35, 22, 28, 29, 27, 27, 30, 30, 0, 
+           1, 2, 4, 5, 
+        };
+
+        private static bool jjCanMove_0(int hiByte, int i1, int i2, long l1, long l2)
+        {
+            switch (hiByte)
+            {
+                case 48:
+                    return ((jjbitVec0[i2] & l2) != 0L);
+                default:
+                    return false;
+            }
+        }
+
+        private static bool jjCanMove_1(int hiByte, int i1, int i2, long l1, long l2)
+        {
+            switch (hiByte)
+            {
+                case 0:
+                    return ((jjbitVec3[i2] & l2) != 0L);
+                default:
+                    if ((jjbitVec1[i1] & l1) != 0L)
+                        return true;
+                    return false;
+            }
+        }
+
+        private static bool jjCanMove_2(int hiByte, int i1, int i2, long l1, long l2)
+        {
+            switch (hiByte)
+            {
+                case 0:
+                    return ((jjbitVec3[i2] & l2) != 0L);
+                case 48:
+                    return ((jjbitVec1[i2] & l2) != 0L);
+                default:
+                    if ((jjbitVec4[i1] & l1) != 0L)
+                        return true;
+                    return false;
+            }
+        }
+
+        /** Token literal values. */
+        public static readonly String[] jjstrLiteralImages = {
+            "", null, null, null, null, null, null, null, null, null, null, "\u002b", "\u002d", 
+            null, "\u0028", "\u0029", "\u003a", "\u002a", "\u005e", null, null, null, null, null, null, 
+            "\u005b", "\u007b", null, "\u0054\u004f", "\u005d", "\u007d", null, null, };
+
+        /** Lexer state names. */
+        public static readonly String[] lexStateNames = {
+           "Boost",
+           "Range",
+           "DEFAULT",
+        };
+
+        /** Lex State array. */
+        public static readonly int[] jjnewLexState = {
+           -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, 
+           1, 1, 2, -1, 2, 2, -1, -1, 
+        };
+        static readonly long[] jjtoToken = {
+           0x1ffffff01L, 
+        };
+        static readonly long[] jjtoSkip = {
+           0x80L, 
+        };
+
+        protected ICharStream input_stream;
+        private readonly int[] jjrounds = new int[49];
+        private readonly int[] jjstateSet = new int[98];
+        protected char curChar;
+
+        /** Constructor. */
+        public QueryParserTokenManager(ICharStream stream)
+        {
+            input_stream = stream;
+        }
+
+        /** Constructor. */
+        public QueryParserTokenManager(ICharStream stream, int lexState)
+            : this(stream)
+        {
+            SwitchTo(lexState);
+        }
+
+        /** Reinitialise parser. */
+        public void ReInit(ICharStream stream)
+        {
+            jjmatchedPos = jjnewStateCnt = 0;
+            curLexState = defaultLexState;
+            input_stream = stream;
+            ReInitRounds();
+        }
+
+        private void ReInitRounds()
+        {
+            int i;
+            jjround = unchecked((int)0x80000001);
+            for (i = 49; i-- > 0; )
+                jjrounds[i] = unchecked((int)0x80000000);
+        }
+
+        /** Reinitialise parser. */
+        public void ReInit(ICharStream stream, int lexState)
+        {
+            ReInit(stream);
+            SwitchTo(lexState);
+        }
+
+        /** Switch to specified lex state. */
+        public void SwitchTo(int lexState)
+        {
+            if (lexState >= 3 || lexState < 0)
+                throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
+            else
+                curLexState = lexState;
+        }
+
+        protected Token jjFillToken()
+        {
+            Token t;
+            String curTokenImage;
+            int beginLine;
+            int endLine;
+            int beginColumn;
+            int endColumn;
+            String im = jjstrLiteralImages[jjmatchedKind];
+            curTokenImage = (im == null) ? input_stream.GetImage() : im;
+            beginLine = input_stream.BeginLine;
+            beginColumn = input_stream.BeginColumn;
+            endLine = input_stream.EndLine;
+            endColumn = input_stream.EndColumn;
+            t = Token.NewToken(jjmatchedKind, curTokenImage);
+
+            t.beginLine = beginLine;
+            t.endLine = endLine;
+            t.beginColumn = beginColumn;
+            t.endColumn = endColumn;
+
+            return t;
+        }
+
+        int curLexState = 2;
+        int defaultLexState = 2;
+        int jjnewStateCnt;
+        int jjround;
+        int jjmatchedPos;
+        int jjmatchedKind;
+
+        /** Get the next Token. */
+        public Token GetNextToken()
+        {
+            Token matchedToken;
+            int curPos = 0;
+
+            for (; ; )
+            {
+                try
+                {
+                    curChar = input_stream.BeginToken();
+                }
+                catch (IOException e)
+                {
+                    jjmatchedKind = 0;
+                    matchedToken = jjFillToken();
+                    return matchedToken;
+                }
+
+                switch (curLexState)
+                {
+                    case 0:
+                        jjmatchedKind = 0x7fffffff;
+                        jjmatchedPos = 0;
+                        curPos = jjMoveStringLiteralDfa0_0();
+                        break;
+                    case 1:
+                        jjmatchedKind = 0x7fffffff;
+                        jjmatchedPos = 0;
+                        curPos = jjMoveStringLiteralDfa0_1();
+                        break;
+                    case 2:
+                        jjmatchedKind = 0x7fffffff;
+                        jjmatchedPos = 0;
+                        curPos = jjMoveStringLiteralDfa0_2();
+                        break;
+                }
+                if (jjmatchedKind != 0x7fffffff)
+                {
+                    if (jjmatchedPos + 1 < curPos)
+                        input_stream.Backup(curPos - jjmatchedPos - 1);
+                    if ((jjtoToken[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L)
+                    {
+                        matchedToken = jjFillToken();
+                        if (jjnewLexState[jjmatchedKind] != -1)
+                            curLexState = jjnewLexState[jjmatchedKind];
+                        return matchedToken;
+                    }
+                    else
+                    {
+                        if (jjnewLexState[jjmatchedKind] != -1)
+                            curLexState = jjnewLexState[jjmatchedKind];
+                        continue;
+                    }
+                }
+                int error_line = input_stream.EndLine;
+                int error_column = input_stream.EndColumn;
+                String error_after = null;
+                bool EOFSeen = false;
+                try { input_stream.ReadChar(); input_stream.Backup(1); }
+                catch (IOException e1)
+                {
+                    EOFSeen = true;
+                    error_after = curPos <= 1 ? "" : input_stream.GetImage();
+                    if (curChar == '\n' || curChar == '\r')
+                    {
+                        error_line++;
+                        error_column = 0;
+                    }
+                    else
+                        error_column++;
+                }
+                if (!EOFSeen)
+                {
+                    input_stream.Backup(1);
+                    error_after = curPos <= 1 ? "" : input_stream.GetImage();
+                }
+                throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, TokenMgrError.LEXICAL_ERROR);
+            }
+        }
+
+        private void jjCheckNAdd(int state)
+        {
+            if (jjrounds[state] != jjround)
+            {
+                jjstateSet[jjnewStateCnt++] = state;
+                jjrounds[state] = jjround;
+            }
+        }
+
+        private void jjAddStates(int start, int end)
+        {
+            do
+            {
+                jjstateSet[jjnewStateCnt++] = jjnextStates[start];
+            } while (start++ != end);
+        }
+
+        private void jjCheckNAddTwoStates(int state1, int state2)
+        {
+            jjCheckNAdd(state1);
+            jjCheckNAdd(state2);
+        }
+
+        private void jjCheckNAddStates(int start, int end)
+        {
+            do
+            {
+                jjCheckNAdd(jjnextStates[start]);
+            } while (start++ != end);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Classic/Token.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/Token.cs b/src/contrib/QueryParsers/Classic/Token.cs
new file mode 100644
index 0000000..876b9d4
--- /dev/null
+++ b/src/contrib/QueryParsers/Classic/Token.cs
@@ -0,0 +1,104 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.Serialization;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.QueryParsers.Classic
+{
+    [Serializable]
+    public class Token // : ISerializable
+    {
+        private const long serialVersionUID = 1L;
+
+        /**
+        * An integer that describes the kind of this token.  This numbering
+        * system is determined by JavaCCParser, and a table of these numbers is
+        * stored in the file ...Constants.java.
+        */
+        public int kind;
+
+        /** The line number of the first character of this Token. */
+        public int beginLine;
+        /** The column number of the first character of this Token. */
+        public int beginColumn;
+        /** The line number of the last character of this Token. */
+        public int endLine;
+        /** The column number of the last character of this Token. */
+        public int endColumn;
+
+        /**
+         * The string image of the token.
+         */
+        public String image;
+
+        /**
+        * A reference to the next regular (non-special) token from the input
+        * stream.  If this is the last token from the input stream, or if the
+        * token manager has not read tokens beyond this one, this field is
+        * set to null.  This is true only if this token is also a regular
+        * token.  Otherwise, see below for a description of the contents of
+        * this field.
+        */
+        public Token next;
+
+        /**
+         * This field is used to access special tokens that occur prior to this
+         * token, but after the immediately preceding regular (non-special) token.
+         * If there are no such special tokens, this field is set to null.
+         * When there are more than one such special token, this field refers
+         * to the last of these special tokens, which in turn refers to the next
+         * previous special token through its specialToken field, and so on
+         * until the first special token (whose specialToken field is null).
+         * The next fields of special tokens refer to other special tokens that
+         * immediately follow it (without an intervening regular token).  If there
+         * is no such token, this field is null.
+         */
+        public Token specialToken;
+
+        /**
+         * An optional attribute value of the Token.
+         * Tokens which are not used as syntactic sugar will often contain
+         * meaningful values that will be used later on by the compiler or
+         * interpreter. This attribute value is often different from the image.
+         * Any subclass of Token that actually wants to return a non-null value can
+         * override this method as appropriate.
+         */
+        public virtual Object Value
+        {
+            get { return null; }
+        }
+
+        public Token() { }
+
+        public Token(int kind)
+            : this(kind, null)
+        {
+        }
+
+        public Token(int kind, String image)
+        {
+            this.kind = kind;
+            this.image = image;
+        }
+
+        public override string ToString()
+        {
+            return image;
+        }
+
+        public static Token NewToken(int ofKind, String image)
+        {
+            switch (ofKind)
+            {
+                default: return new Token(ofKind, image);
+            }
+        }
+
+        public static Token NewToken(int ofKind)
+        {
+            return NewToken(ofKind, null);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Classic/TokenMgrError.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/TokenMgrError.cs b/src/contrib/QueryParsers/Classic/TokenMgrError.cs
new file mode 100644
index 0000000..7c4f017
--- /dev/null
+++ b/src/contrib/QueryParsers/Classic/TokenMgrError.cs
@@ -0,0 +1,105 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.QueryParsers.Classic
+{
+    public class TokenMgrError : Exception
+    {
+        private const long serialVersionUID = 1L;
+
+        internal const int LEXICAL_ERROR = 0;
+
+        internal const int STATIC_LEXER_ERROR = 1;
+
+        internal const int INVALID_LEXICAL_STATE = 2;
+
+        internal const int LOOP_DETECTED = 3;
+
+        internal int errorCode;
+
+        protected static String AddEscapes(String str)
+        {
+            StringBuilder retval = new StringBuilder();
+            char ch;
+            for (int i = 0; i < str.Length; i++)
+            {
+                switch (str[i])
+                {
+                    case (char)0:
+                        continue;
+                    case '\b':
+                        retval.Append("\\b");
+                        continue;
+                    case '\t':
+                        retval.Append("\\t");
+                        continue;
+                    case '\n':
+                        retval.Append("\\n");
+                        continue;
+                    case '\f':
+                        retval.Append("\\f");
+                        continue;
+                    case '\r':
+                        retval.Append("\\r");
+                        continue;
+                    case '\"':
+                        retval.Append("\\\"");
+                        continue;
+                    case '\'':
+                        retval.Append("\\\'");
+                        continue;
+                    case '\\':
+                        retval.Append("\\\\");
+                        continue;
+                    default:
+                        if ((ch = str[i]) < 0x20 || ch > 0x7e)
+                        {
+                            String s = "0000" + Convert.ToString(ch, 16);
+                            retval.Append("\\u" + s.Substring(s.Length - 4, s.Length));
+                        }
+                        else
+                        {
+                            retval.Append(ch);
+                        }
+                        continue;
+                }
+            }
+            return retval.ToString();
+        }
+
+        protected static String LexicalError(bool EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar)
+        {
+            return ("Lexical error at line " +
+                  errorLine + ", column " +
+                  errorColumn + ".  Encountered: " +
+                  (EOFSeen ? "<EOF> " : ("\"" + AddEscapes(curChar.ToString()) + "\"") + " (" + (int)curChar + "), ") +
+                  "after : \"" + AddEscapes(errorAfter) + "\"");
+        }
+
+        public override string Message
+        {
+            get
+            {
+                return base.Message;
+            }
+        }
+
+        public TokenMgrError()
+        {
+        }
+
+        public TokenMgrError(String message, int reason)
+            : base(message)
+        {
+            errorCode = reason;
+        }
+
+        public TokenMgrError(bool EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar, int reason)
+            : this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason)
+        {            
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Contrib.QueryParsers.csproj
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Contrib.QueryParsers.csproj b/src/contrib/QueryParsers/Contrib.QueryParsers.csproj
new file mode 100644
index 0000000..7cb9b2d
--- /dev/null
+++ b/src/contrib/QueryParsers/Contrib.QueryParsers.csproj
@@ -0,0 +1,69 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
+  <PropertyGroup>
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+    <ProjectGuid>{56438272-B00E-40DE-9C9A-0785E705E7D9}</ProjectGuid>
+    <OutputType>Library</OutputType>
+    <AppDesignerFolder>Properties</AppDesignerFolder>
+    <RootNamespace>Lucene.Net.QueryParsers</RootNamespace>
+    <AssemblyName>Contrib.QueryParsers</AssemblyName>
+    <TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
+    <FileAlignment>512</FileAlignment>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
+    <DebugSymbols>true</DebugSymbols>
+    <DebugType>full</DebugType>
+    <Optimize>false</Optimize>
+    <OutputPath>bin\Debug\</OutputPath>
+    <DefineConstants>DEBUG;TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+  </PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
+    <DebugType>pdbonly</DebugType>
+    <Optimize>true</Optimize>
+    <OutputPath>bin\Release\</OutputPath>
+    <DefineConstants>TRACE</DefineConstants>
+    <ErrorReport>prompt</ErrorReport>
+    <WarningLevel>4</WarningLevel>
+  </PropertyGroup>
+  <ItemGroup>
+    <Reference Include="System" />
+    <Reference Include="System.Configuration" />
+    <Reference Include="System.Core" />
+    <Reference Include="System.Xml.Linq" />
+    <Reference Include="System.Data.DataSetExtensions" />
+    <Reference Include="Microsoft.CSharp" />
+    <Reference Include="System.Data" />
+    <Reference Include="System.Xml" />
+  </ItemGroup>
+  <ItemGroup>
+    <Compile Include="Classic\FastCharStream.cs" />
+    <Compile Include="Classic\ICharStream.cs" />
+    <Compile Include="Classic\QueryParserConstants.cs" />
+    <Compile Include="Classic\ParseException.cs" />
+    <Compile Include="Classic\QueryParser.cs" />
+    <Compile Include="Classic\QueryParserBase.cs" />
+    <Compile Include="Classic\QueryParserTokenManager.cs" />
+    <Compile Include="Classic\Token.cs" />
+    <Compile Include="Classic\TokenMgrError.cs" />
+    <Compile Include="Flexible\Standard\ICommonQueryParserConfiguration.cs" />
+    <Compile Include="Properties\AssemblyInfo.cs" />
+  </ItemGroup>
+  <ItemGroup>
+    <ProjectReference Include="..\..\core\Lucene.Net.csproj">
+      <Project>{5d4ad9be-1ffb-41ab-9943-25737971bf57}</Project>
+      <Name>Lucene.Net</Name>
+    </ProjectReference>
+  </ItemGroup>
+  <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
+  <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
+       Other similar extension points exist, see Microsoft.Common.targets.
+  <Target Name="BeforeBuild">
+  </Target>
+  <Target Name="AfterBuild">
+  </Target>
+  -->
+</Project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Flexible/Standard/ICommonQueryParserConfiguration.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Flexible/Standard/ICommonQueryParserConfiguration.cs b/src/contrib/QueryParsers/Flexible/Standard/ICommonQueryParserConfiguration.cs
new file mode 100644
index 0000000..4e9de64
--- /dev/null
+++ b/src/contrib/QueryParsers/Flexible/Standard/ICommonQueryParserConfiguration.cs
@@ -0,0 +1,37 @@
+using Lucene.Net.Analysis;
+using Lucene.Net.Documents;
+using Lucene.Net.Search;
+using System;
+using System.Collections.Generic;
+using System.Globalization;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.QueryParsers.Flexible.Standard
+{
+    public interface ICommonQueryParserConfiguration
+    {
+        bool LowercaseExpandedTerms { get; set; }
+
+        bool AllowLeadingWildcard { get; set; }
+
+        bool EnablePositionIncrements { get; set; }
+
+        MultiTermQuery.RewriteMethod MultiTermRewriteMethod { get; set; }
+
+        int FuzzyPrefixLength { get; set; }
+
+        CultureInfo Locale { get; set; }
+
+        TimeZone TimeZone { get; set; }
+
+        int PhraseSlop { get; set; }
+
+        Analyzer Analyzer { get; }
+
+        float FuzzyMinSim { get; set; }
+
+        DateTools.Resolution DateResolution { get; set; }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Properties/AssemblyInfo.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Properties/AssemblyInfo.cs b/src/contrib/QueryParsers/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..afe1d6b
--- /dev/null
+++ b/src/contrib/QueryParsers/Properties/AssemblyInfo.cs
@@ -0,0 +1,36 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following 
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle("Contrib.QueryParsers")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("")]
+[assembly: AssemblyProduct("Contrib.QueryParsers")]
+[assembly: AssemblyCopyright("Copyright ©  2013")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// Setting ComVisible to false makes the types in this assembly not visible 
+// to COM components.  If you need to access a type in this assembly from 
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible(false)]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid("ea92a6e7-09bd-4d95-ac41-7fc0605e6c3e")]
+
+// Version information for an assembly consists of the following four values:
+//
+//      Major Version
+//      Minor Version 
+//      Build Number
+//      Revision
+//
+// You can specify all the values or you can default the Build and Revision Numbers 
+// by using the '*' as shown below:
+// [assembly: AssemblyVersion("1.0.*")]
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]


[48/50] [abbrv] git commit: fixed fst errors

Posted by mh...@apache.org.
fixed fst errors


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/b7ca14a7
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/b7ca14a7
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/b7ca14a7

Branch: refs/heads/branch_4x
Commit: b7ca14a77922a0e4d216781f74b9c1379f350698
Parents: 96a95e3
Author: James Blair <jm...@gmail.com>
Authored: Tue Aug 13 15:49:10 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Aug 13 16:18:11 2013 -0400

----------------------------------------------------------------------
 src/core/Util/Fst/FST.cs | 478 +++++++++++++++++++++++++++++++++++++-----
 1 file changed, 423 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b7ca14a7/src/core/Util/Fst/FST.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/FST.cs b/src/core/Util/Fst/FST.cs
index c6cbcd6..70f4e8e 100644
--- a/src/core/Util/Fst/FST.cs
+++ b/src/core/Util/Fst/FST.cs
@@ -9,13 +9,15 @@ using Lucene.Net.Codecs;
 
 namespace Lucene.Net.Util.Fst
 {
-    public class FST<T> : FST
+    public sealed class FST<T> : FST
     {
         private readonly INPUT_TYPE inputType;
         public INPUT_TYPE InputType { get { return inputType; } }
 
         private int[] bytesPerArc = new int[0];
 
+        // if non-null, this FST accepts the empty string and
+        // produces this output
         private T emptyOutput;
         public T EmptyOutput
         {
@@ -37,18 +39,24 @@ namespace Lucene.Net.Util.Fst
         private readonly Outputs<T> outputs;
         public Outputs<T> Outputs { get { return outputs; } }
 
+        // Used for the BIT_TARGET_NEXT optimization (whereby
+        // instead of storing the address of the target node for
+        // a given arc, we mark a single bit noting that the next
+        // node in the byte[] is the target node):
         private long lastFrozenNode;
 
         private readonly T NO_OUTPUT;
 
         internal long NodeCount { get; set; }
-
         public long ArcCount { get; set; }
         public long ArcWithOutputCount { get; set; }
 
         private readonly bool packed;
         private PackedInts.IReader nodeRefToAddress;
 
+        /// <summary>
+        /// If arc has this label then that arc is final/accepted
+        /// </summary>
         public const int END_LABEL = -1;
 
         private readonly bool allowArrayArcs;
@@ -63,10 +71,15 @@ namespace Lucene.Net.Util.Fst
 
         private GrowableWriter NodeAddress;
 
+        // TODO: we could be smarter here, and prune periodically
+        // as we go; high in-count nodes will "usually" become
+        // clear early on:
         private GrowableWriter InCounts;
 
         private readonly int Version;
 
+        // make a new empty FST, for building; Builder invokes
+        // this ctor
         internal FST(INPUT_TYPE inputType, Outputs<T> outputs, bool willPackFST, float acceptableOverheadRatio,
             bool allowArrayArcs, int bytesPageBits)
         {
@@ -75,6 +88,8 @@ namespace Lucene.Net.Util.Fst
             this.allowArrayArcs = allowArrayArcs;
             Version = VERSION_CURRENT;
             bytes = new BytesStore(bytesPageBits);
+            // pad: ensure no node gets address 0 which is reserved to mean
+            // the stop state w/ no arcs
             bytes.WriteByte((Byte)0);
             NO_OUTPUT = outputs.GetNoOutput();
             if (willPackFST)
@@ -95,31 +110,53 @@ namespace Lucene.Net.Util.Fst
 
         public static readonly int DEFAULT_MAX_BLOCK_BITS = Constants.JRE_IS_64BIT ? 30 : 28;
 
+        /// <summary>
+        /// Load a previously saved FST.
+        /// </summary>
+        /// <param name="input"></param>
+        /// <param name="outputs"></param>
         public FST(DataInput input, Outputs<T> outputs)
             : this(input, outputs, DEFAULT_MAX_BLOCK_BITS)
         {
         }
 
+        /// <summary>
+        /// Load a previously saved FST; maxBlockBits allows you to
+        /// control the size of the byte[] pages used to hold the FST bytes.
+        /// </summary>
+        /// <param name="dataInput"></param>
+        /// <param name="outputs"></param>
+        /// <param name="maxBlockBits"></param>
         public FST(DataInput dataInput, Outputs<T> outputs, int maxBlockBits)
         {
             this.outputs = outputs;
 
-            if (maxBlockBits < 1 || maxBlockBits > 30) throw new ArgumentException("maxBlockBits should be 1 .. 30; got " + maxBlockBits, "maxBlockBits");
-            Version = CodecUtil.CheckHeader(dataInput, FILE_FORMAT_NAME, VERSIONpacked, VERSION_VINT_TARGET);
+            if (maxBlockBits < 1 || maxBlockBits > 30)
+                throw new ArgumentException("maxBlockBits should be 1 .. 30; got " + maxBlockBits, "maxBlockBits");
+
+            // NOTE: only reads most recent format; we don't have
+            // back-compat promise for FSTs (they are experimental):
+            Version = CodecUtil.CheckHeader(dataInput, FILE_FORMAT_NAME, VERSION_PACKED, VERSION_VINT_TARGET);
             packed = dataInput.ReadByte() == 1;
 
             if (dataInput.ReadByte() == 1)
             {
+                // accepts empty string
+                // 1 KB blocks:
                 var emptyBytes = new BytesStore(10);
                 int numBytes = dataInput.ReadVInt();
                 emptyBytes.CopyBytes(dataInput, numBytes);
 
+                // De-serialize empty-string output:
                 FST.BytesReader reader;
                 if (packed)
                     reader = emptyBytes.GetForwardReader();
                 else
                 {
                     reader = emptyBytes.GetReverseReader();
+                    // NoOutputs uses 0 bytes when writing its output,
+                    // so we have to check here else BytesStore gets
+                    // angry:
                     if (numBytes > 0)
                         reader.Position = numBytes - 1;
                 }
@@ -159,9 +196,16 @@ namespace Lucene.Net.Util.Fst
 
             CacheRootArcs();
 
+            // NOTE: bogus because this is only used during
+            // building; we need to break out mutable FST from
+            // immutable
             allowArrayArcs = false;
         }
 
+        /// <summary>
+        /// Returns bytes used to represent the FST.
+        /// </summary>
+        /// <returns></returns>
         public long SizeInBytes()
         {
             var size = bytes.GetPosition();
@@ -197,6 +241,7 @@ namespace Lucene.Net.Util.Fst
             return node;
         }
 
+        // Caches first 128 labels
         private void CacheRootArcs()
         {
             cachedRootArcs = new Arc<T>[0x80];
@@ -238,6 +283,8 @@ namespace Lucene.Net.Util.Fst
             else
                 output.WriteByte((sbyte)0);
 
+            // TODO: really we should encode this as an arc, arriving
+            // to the root node, instead of special casing here:
             if (emptyOutput != null)
             {
                 // Accepts empty string
@@ -298,10 +345,14 @@ namespace Lucene.Net.Util.Fst
             bytes.WriteTo(output);
         }
 
-        public void Save(FileStream fileStream)
+        /// <summary>
+        /// Writes an automaton to a file
+        /// </summary>
+        /// <param name="file"></param>
+        public void Save(FileInfo fileInfo)
         {
             var success = false;
-            var bs = new BufferedStream(fileStream);
+            var bs = new BufferedStream(new FileStream(fileInfo.FullName));
             try
             {
                 Save(new OutputStreamDataOutput(bs));
@@ -316,9 +367,16 @@ namespace Lucene.Net.Util.Fst
             }
         }
 
-        public static FST<TMethod> Read<TMethod>(FileStream fileStream, Outputs<TMethod> outputs) where TMethod : class
+        /// <summary>
+        /// Reads an automaton from a file
+        /// </summary>
+        /// <typeparam name="TMethod"></typeparam>
+        /// <param name="fileInfo"></param>
+        /// <param name="outputs"></param>
+        /// <returns></returns>
+        public static FST<TMethod> Read<TMethod>(FileStream fileInfo, Outputs<TMethod> outputs) where TMethod : class
         {
-            var bs = new BufferedStream(fileStream);
+            var bs = new BufferedStream(new FileStream(fileInfo));
             var success = false;
             try
             {
@@ -373,11 +431,20 @@ namespace Lucene.Net.Util.Fst
             return v;
         }
 
+        /// <summary>
+        /// Returns true if the node at this address
+        /// has any outgoing arcs
+        /// </summary>
+        /// <typeparam name="TMethod"></typeparam>
+        /// <param name="arc"></param>
+        /// <returns></returns>
         public static bool TargetHasArcs<TMethod>(Arc<TMethod> arc)
         {
             return arc.Target > 0;
         }
 
+        // Serializes new node by appending its bytes to the end
+        // of the current byte[]
         internal long AddNode(Builder<T>.UnCompiledNode<T> nodeIn)
         {
             if (nodeIn.NumArcs == 0)
@@ -408,6 +475,9 @@ namespace Lucene.Net.Util.Fst
                     flags += BIT_LAST_ARC;
 
                 if (lastFrozenNode == target.Node && !doFixedArray)
+                    // TODO: for better perf (but more RAM used) we
+                    // could avoid this except when arc is "near" the
+                    // last arc:
                     flags += BIT_TARGET_NEXT;
 
                 if (arc.IsFinal)
@@ -418,7 +488,6 @@ namespace Lucene.Net.Util.Fst
                 }
                 else
                 {
-                    // TODO: Assert is correct here?
                     //Debug.Assert(arc.NextFinalOutput == NO_OUTPUT);
                 }
 
@@ -453,6 +522,9 @@ namespace Lucene.Net.Util.Fst
                     bytes.WriteVLong(target.Node);
                 }
 
+                // just write the arcs "like normal" on first pass,
+                // but record how many bytes each one took, and max
+                // byte size:
                 if (doFixedArray)
                 {
                     bytesPerArc[arcIdx] = (int)(bytes.GetPosition() - lastArcStart);
@@ -461,14 +533,36 @@ namespace Lucene.Net.Util.Fst
                 }
             }
 
+            // TODO: try to avoid wasteful cases: disable doFixedArray in that case
+            /* 
+             * 
+             * LUCENE-4682: what is a fair heuristic here?
+             * It could involve some of these:
+             * 1. how "busy" the node is: nodeIn.inputCount relative to frontier[0].inputCount?
+             * 2. how much binSearch saves over scan: nodeIn.numArcs
+             * 3. waste: numBytes vs numBytesExpanded
+             * 
+             * the one below just looks at #3
+            if (doFixedArray) {
+              // rough heuristic: make this 1.25 "waste factor" a parameter to the phd ctor????
+              int numBytes = lastArcStart - startAddress;
+              int numBytesExpanded = maxBytesPerArc * nodeIn.numArcs;
+              if (numBytesExpanded > numBytes*1.25) {
+                doFixedArray = false;
+              }
+            }
+            */
+
             if (doFixedArray)
             {
                 var MAX_HEADER_SIZE = 11;
-                // TODO: assert correct here?
-                Debug.Assert(maxBytesPerArc > 0);
+                // assert maxBytesPerArc > 0;
 
+                // create the header
+                // TODO: clean this up: or just rewind+reuse and deal with it
                 var header = new byte[MAX_HEADER_SIZE];
                 var bad = new ByteArrayDataOutput(header);
+                // write a "false" first arc:
                 bad.WriteByte(ARCS_AS_FIXED_ARRAY);
                 bad.WriteVInt(nodeIn.NumArcs);
                 bad.WriteVInt(maxBytesPerArc);
@@ -476,10 +570,10 @@ namespace Lucene.Net.Util.Fst
 
                 long fixedArrayStart = startAddress + headerLen;
 
+                // expand the arcs in place, backwards
                 var srcPos = bytes.GetPosition();
                 var destPos = fixedArrayStart + nodeIn.NumArcs * maxBytesPerArc;
-                // TODO: assert correct here?
-                Debug.Assert(destPos >= srcPos);
+                // assert destPos >= srcPos
                 if (destPos > srcPos)
                 {
                     bytes.SkipBytes((int)(destPos - srcPos));
@@ -517,6 +611,7 @@ namespace Lucene.Net.Util.Fst
             long node;
             if (NodeAddress != null)
             {
+                // Nodes are addressed by 1+ord:
                 if ((int)NodeCount == NodeAddress.Size())
                 {
                     NodeAddress =
@@ -536,6 +631,12 @@ namespace Lucene.Net.Util.Fst
             return node;
         }
 
+        /// <summary>
+        /// Fills virtual 'start' arc, ie, an empty incoming arc
+        /// to the FST's start node
+        /// </summary>
+        /// <param name="arc"></param>
+        /// <returns></returns>
         public Arc<T> GetFirstArc(Arc<T> arc)
         {
             if (EmptyOutput != null)
@@ -550,15 +651,26 @@ namespace Lucene.Net.Util.Fst
             }
             arc.Output = NO_OUTPUT;
 
+            // If there are no nodes, ie, the FST only accepts the
+            // empty string, then startNode is 0
+            arc.Target = startNode;
             return arc;
         }
 
+        /// <summary>
+        /// Follows the <code>follow</code> arc and reads the last
+        /// arc of its target; this changes the provided
+        /// <code>arc</code> (2nd arg) in-place and returns it
+        /// </summary>
+        /// <param name="follow"></param>
+        /// <param name="arc"></param>
+        /// <param name="input"></param>
+        /// <returns>Returns the second argument</returns>
         public Arc<T> ReadLastTargetArc(Arc<T> follow, Arc<T> arc, FST.BytesReader input)
         {
             if (!TargetHasArcs(follow))
             {
-                // TODO: assert correct here?
-                Debug.Assert(follow.IsFinal());
+                // assert follow.isFinal();
                 arc.Label = END_LABEL;
                 arc.Target = FINAL_END_NODE;
                 arc.Output = follow.NextFinalOutput;
@@ -572,6 +684,7 @@ namespace Lucene.Net.Util.Fst
                 var b = input.ReadByte();
                 if (b == ARCS_AS_FIXED_ARRAY)
                 {
+                    // array: jump straight to end
                     arc.NumArcs = input.ReadVInt();
                     if (packed || Version >= VERSION_VINT_TARGET)
                         arc.BytesPerArc = input.ReadVInt();
@@ -584,15 +697,21 @@ namespace Lucene.Net.Util.Fst
                 else
                 {
                     arc.Flags = (sbyte)b;
+                    // non-array: linear scan
                     arc.BytesPerArc = 0;
 
                     while (!arc.IsLast())
                     {
+                        // skip this arc:
                         ReadLabel(input);
                         if (arc.Flag(BIT_ARC_HAS_OUTPUT))
+                        {
                             Outputs.Read(input);
+                        }
                         if (arc.Flag(BIT_ARC_HAS_FINAL_OUTPUT))
+                        {
                             Outputs.ReadFinalOutput(input);
+                        }
                         if (arc.Flag(BIT_STOP_NODE))
                         {
                         }
@@ -606,14 +725,13 @@ namespace Lucene.Net.Util.Fst
 
                         arc.Flags = (sbyte)input.ReadByte();
                     }
-
+                    // Undo the byte flags we read:
                     input.SkipBytes(-1);
                     arc.NextArc = input.Position;
                 }
 
                 ReadNextRealArc(arc, input);
-                // TODO: assert is correct here?
-                //Debug.Assert(arc.IsLast());
+                // assert arc.isLast();
                 return arc;
             }
         }
@@ -629,10 +747,20 @@ namespace Lucene.Net.Util.Fst
             return target;
         }
 
+        /// <summary>
+        /// Follow the <code>follow</code> arc and read the first arc of its target;
+        /// this changes the provided <code>arc</code> (2nd arg) in-place and returns it.
+        /// </summary>
+        /// <param name="follow"></param>
+        /// <param name="arc"></param>
+        /// <param name="input"></param>
+        /// <returns>Returns the second argument (<code>arc</code>)</returns>
         public Arc<T> ReadFirstTargetArc(Arc<T> follow, Arc<T> arc, FST.BytesReader input)
         {
+            // int pos = address;
             if (follow.IsFinal())
             {
+                // Insert "fake" final first arc:
                 arc.Label = END_LABEL;
                 arc.Output = follow.NextFinalOutput;
                 arc.Flags = (sbyte)BIT_FINAL_ARC;
@@ -641,6 +769,7 @@ namespace Lucene.Net.Util.Fst
                 else
                 {
                     arc.Node = follow.Target;
+                    // NOTE: nextArc is a node (not an address!) in this case:
                     arc.NextArc = follow.Target;
                 }
                 arc.Target = FINAL_END_NODE;
@@ -679,6 +808,13 @@ namespace Lucene.Net.Util.Fst
             return ReadNextRealArc(arc, input);
         }
 
+        /// <summary>
+        /// Checks if <code>arc</code>'s target state is in expanded (or vector) format.
+        /// </summary>
+        /// <param name="follow"></param>
+        /// <param name="input"></param>
+        /// <returns>Returns <code>true</code> if <code>arc</code> points to a state 
+        /// in an expanded array format.</returns>
         internal bool IsExpandedTarget(Arc<T> follow, FST.BytesReader input)
         {
             if (!TargetHasArcs(follow))
@@ -690,10 +826,17 @@ namespace Lucene.Net.Util.Fst
             }
         }
 
+        /// <summary>
+        /// In-place read; returns the arc.
+        /// </summary>
+        /// <param name="arc"></param>
+        /// <param name="input"></param>
+        /// <returns></returns>
         public Arc<T> ReadNextArc(Arc<T> arc, FST.BytesReader input)
         {
             if (arc.Label == END_LABEL)
             {
+                // This was a fake isnerted "final" arc
                 if (arc.NextArc <= 0)
                     throw new ArgumentException("cannot readNextArc when arc.isLast()=true");
                 return ReadFirstRealTargetArc(arc.NextArc, arc, input);
@@ -704,10 +847,17 @@ namespace Lucene.Net.Util.Fst
             }
         }
 
+        /// <summary>
+        /// Peeks at next arc's label; does not alter arc. Do
+        /// not call this if arc.IsLast()!
+        /// </summary>
+        /// <param name="arc"></param>
+        /// <param name="input"></param>
+        /// <returns></returns>
         public int ReadNextArcLabel(Arc<T> arc, FST.BytesReader input)
         {
-            // TODO: assert correct here?
-            Debug.Assert(!arc.IsLast());
+            if (arc.IsLast)
+                throw new ArgumentException("cannot readNextArc when arc.isLast()=true");
 
             if (arc.Label == END_LABEL)
             {
@@ -719,6 +869,7 @@ namespace Lucene.Net.Util.Fst
                 {
                     input.ReadVInt();
 
+                    // Skip bytesPerArc:
                     if (packed || Version >= VERSION_VINT_TARGET)
                         input.ReadVInt();
                     else
@@ -726,60 +877,101 @@ namespace Lucene.Net.Util.Fst
                 }
                 else
                 {
+                    input.Position = pos;
+                }
+            }
+            else
+            {
+                if (arc.BytesPerArc != 0)
+                {
+                    // arcs are at fixed entries
+                    input.Position = arc.PosArcsStart;
+                    input.SkipBytes((1 + arc.ArcIdx) * arc.BytesPerArc);
+                }
+                else
+                {
+                    // arcs are packed
                     input.Position = arc.NextArc;
                 }
             }
 
+            // skip flags
             input.ReadByte();
             return ReadLabel(input);
         }
 
+        /// <summary>
+        /// Never returns null, but you should never call this if
+        /// arc.IsLast() is true.
+        /// </summary>
+        /// <param name="arc"></param>
+        /// <param name="input"></param>
+        /// <returns></returns>
         public Arc<T> ReadNextRealArc(Arc<T> arc, FST.BytesReader input)
         {
+            // TODO: can't assert this because we call from readFirstArc
+            // assert !flag(arc.flags, BIT_LAST_ARC);
+
+            // this is a continuing arc in a fixed array
             if (arc.BytesPerArc != 0)
             {
+                // arcs are at fixed entries
                 arc.ArcIdx++;
-                // TODO: assert correct here?
-                Debug.Assert(arc.ArcIdx < arc.NumArcs);
+                // assert arc.arcIdx < arc.numArcs;
                 input.Position = arc.PosArcsStart;
                 input.SkipBytes(arc.ArcIdx * arc.BytesPerArc);
             }
             else
             {
+                // arcs are packed
                 input.Position = arc.NextArc;
             }
             arc.Flags = (sbyte)input.ReadByte();
             arc.Label = ReadLabel(input);
 
             if (arc.Flag(BIT_ARC_HAS_OUTPUT))
+            {
                 arc.Output = Outputs.Read(input);
+            }
             else
+            {
                 arc.Output = Outputs.GetNoOutput();
+            }
 
             if (arc.Flag(BIT_ARC_HAS_FINAL_OUTPUT))
+            {
                 arc.NextFinalOutput = Outputs.ReadFinalOutput(input);
+            }
             else
+            {
                 arc.NextFinalOutput = Outputs.GetNoOutput();
+            }
 
             if (arc.Flag(BIT_STOP_NODE))
             {
                 if (arc.Flag(BIT_FINAL_ARC))
+                {
                     arc.Target = FINAL_END_NODE;
+                }
                 else
+                {
                     arc.Target = NON_FINAL_END_NODE;
-
+                }
                 arc.NextArc = input.Position;
             }
             else if (arc.Flag(BIT_TARGET_NEXT))
             {
                 arc.NextArc = input.Position;
 
+                // TODO: would be nice to make this lazy -- maybe
+                // caller doesn't need the target and is scanning arcs...
                 if (NodeAddress == null)
                 {
                     if (!arc.Flag(BIT_LAST_ARC))
                     {
                         if (arc.BytesPerArc == 0)
                         {
+                            // must scan
                             SeekToNextNode(input);
                         }
                         else
@@ -793,8 +985,7 @@ namespace Lucene.Net.Util.Fst
                 else
                 {
                     arc.Target = arc.Node - 1;
-                    // TODO: assert correct here?
-                    Debug.Assert(arc.Target > 0);
+                    // assert arc.target > 0
                 }
             }
             else
@@ -805,14 +996,17 @@ namespace Lucene.Net.Util.Fst
                     var code = input.ReadVLong();
                     if (arc.Flag(BIT_TARGET_DELTA))
                     {
+                        // Address is delta-coded from current address:
                         arc.Target = pos + code;
                     }
                     else if (code < nodeRefToAddress.Size())
                     {
+                        // Deref
                         arc.Target = nodeRefToAddress.Get((int)code);
                     }
                     else
                     {
+                        // Absolute
                         arc.Target = code;
                     }
                 }
@@ -825,10 +1019,22 @@ namespace Lucene.Net.Util.Fst
             return arc;
         }
 
+        // TODO: could we somehow [partially] tableize arc lookups
+        // look automaton?
+
+        /// <summary>
+        /// Finds an arc leaving the incoming arc, replacing the arc in place.
+        /// This returns null if the arc was not found, else the incoming arc.
+        /// </summary>
+        /// <param name="labelToMatch"></param>
+        /// <param name="follow"></param>
+        /// <param name="arc"></param>
+        /// <param name="input"></param>
+        /// <returns></returns>
         public Arc<T> FindTargetArc(int labelToMatch, Arc<T> follow, Arc<T> arc, FST.BytesReader input)
         {
-            // TODO: appropriate error message
-            if (cachedRootArcs == null) throw new InvalidOperationException("cachedRootArcs cannot be null");
+            if (cachedRootArcs == null)
+                throw new InvalidOperationException("cachedRootArcs cannot be null");
 
             if (labelToMatch == END_LABEL)
             {
@@ -841,6 +1047,7 @@ namespace Lucene.Net.Util.Fst
                     else
                     {
                         arc.Flags = 0;
+                        // NOTE: nextArc is a node (not an address!) in this case:
                         arc.NextArc = follow.Target;
                         arc.Node = follow.Target;
                     }
@@ -854,6 +1061,7 @@ namespace Lucene.Net.Util.Fst
                 }
             }
 
+            // Short-circuit if this arc is in the root arc cache:
             if (follow.Target == startNode && labelToMatch < cachedRootArcs.Length)
             {
                 var result = cachedRootArcs[labelToMatch];
@@ -867,7 +1075,9 @@ namespace Lucene.Net.Util.Fst
             }
 
             if (!TargetHasArcs(follow))
+            {
                 return null;
+            }
 
             input.Position = GetNodeAddress(follow.Target);
 
@@ -875,6 +1085,7 @@ namespace Lucene.Net.Util.Fst
 
             if (input.ReadByte() == ARCS_AS_FIXED_ARRAY)
             {
+                // Arcs are full array; do binary search:
                 arc.NumArcs = input.ReadVInt();
                 if (packed || Version >= VERSION_VINT_TARGET)
                     arc.BytesPerArc = input.ReadVInt();
@@ -903,6 +1114,7 @@ namespace Lucene.Net.Util.Fst
                 return null;
             }
 
+            // Linear scan
             ReadFirstRealTargetArc(follow.Target, arc, input);
 
             while (true)
@@ -938,8 +1150,6 @@ namespace Lucene.Net.Util.Fst
 
                 if (Flag(flags, BIT_LAST_ARC))
                     return;
-
-
             }
         }
 
@@ -949,6 +1159,20 @@ namespace Lucene.Net.Util.Fst
             return 1 + NodeCount;
         }
 
+
+        /// <summary>
+        /// Nodes will be expanded if their depth (distance from the root node) is
+        /// &lt;= this value and their number of arcs is &gt=
+        /// (FIXED_ARRAY_NUM_ARCS_SHALLOW).
+        /// 
+        /// Fixed array consumes more RAM but enables binary search on the arcs
+        /// (instead of a linear scan) on lookup by arc label.
+        /// </summary>
+        /// <param name="node"></param>
+        /// <returns><code>true</code> if <code>node</code> should be stored
+        /// in an expanded (array) form.</returns>
+        /// <see cref="FIXED_ARRAY_NUM_ARCS_DEEP"/>
+        /// <see cref="Builder.UnCompiledNode"/>
         private bool ShouldExpand(Builder<T>.UnCompiledNode<T> node)
         {
             return allowArrayArcs &&
@@ -956,11 +1180,22 @@ namespace Lucene.Net.Util.Fst
                     node.NumArcs >= FIXED_ARRAY_NUM_ARCS_DEEP);
         }
 
+        /// <summary>
+        /// Returns a BytesReader for this FST, positioned at position 0.
+        /// </summary>
+        /// <returns></returns>
         public FST.BytesReader GetBytesReader()
         {
-            return packed ?
-                bytes.GetForwardReader() :
-                bytes.GetReverseReader();
+            BytesReader input;
+            if (packed)
+            {
+                input = bytes.GetForwardReader();
+            }
+            else
+            {
+                input = bytes.GetReverseReader();
+            }
+            return input;
         }
 
 
@@ -977,6 +1212,7 @@ namespace Lucene.Net.Util.Fst
         //    public abstract void SkipBytes(int count);
         //}
 
+        // Creates a packed FST
         private FST(INPUT_TYPE inputType, Outputs<T> outputs, int bytesPageBits)
         {
             Version = VERSION_CURRENT;
@@ -989,9 +1225,42 @@ namespace Lucene.Net.Util.Fst
             allowArrayArcs = false;
         }
 
+        /// <summary>
+        /// Expert: creates an FST by packing this one. This
+        /// process requires substantial additional RAM (currently
+        /// up to ~8 bytes per node depending on 
+        /// <code>acceptableOverheadRatio</code>, but then should
+        /// produce a smaller FST.
+        /// 
+        /// The implementation of this method uses ideas from
+        /// http://www.cs.put.poznan.pl/dweiss/site/publications/download/fsacomp.pdf
+        /// Smaller representation 
+        /// which describes this techniques to reduce the size of a FST.
+        /// However, this is not a strict implementation of the
+        /// algorithms described in this paper.
+        /// </summary>
+        /// <param name="minInCountDeref"></param>
+        /// <param name="maxDerefNodes"></param>
+        /// <param name="acceptableOverheadRatio"></param>
+        /// <returns></returns>
         internal FST<T> Pack(int minInCountDeref, int maxDerefNodes, float acceptableOverheadRatio)
         {
-            if (NodeAddress == null) throw new ArgumentException("this FST was not built with willPackFST=true");
+            // NOTE: maxDerefNodes is intentionally int: we cannot
+            // support > 2.1B deref nodes
+
+            // TODO: other things to try
+            //   - renumber the nodes to get more next / better locality?
+            //   - allow multiple input labels on an arc, so
+            //     singular chain of inputs can take one arc (on
+            //     wikipedia terms this could save another ~6%)
+            //   - in the ord case, the output '1' is presumably
+            //     very common (after NO_OUTPUT)... maybe use a bit
+            //     for it..?
+            //   - use spare bits in flags.... for top few labels /
+            //     outputs / targets
+
+            if (NodeAddress == null)
+                throw new ArgumentException("this FST was not built with willPackFST=true");
 
             var arc = new Arc<T>();
 
@@ -999,8 +1268,10 @@ namespace Lucene.Net.Util.Fst
 
             var topN = Math.Min(maxDerefNodes, InCounts.Size());
 
+            // Find top nodes with highest number of incoming arcs:
             var q = new NodeQueue(topN);
 
+            // TODO: we could use more RAM efficent solution algo here...
             NodeAndInCount bottom = null;
             for (var node = 0; node < InCounts.Size(); node++)
             {
@@ -1021,6 +1292,7 @@ namespace Lucene.Net.Util.Fst
 
             // Free up RAM
             InCounts = null;
+
             var topNodeMap = new HashMap<long, long>();
             for (var downTo = q.Size - 1; downTo >= 0; downTo--)
             {
@@ -1028,9 +1300,11 @@ namespace Lucene.Net.Util.Fst
                 topNodeMap.Add(n.Node, downTo);
             }
 
+            // +1 because node ords start at 1 (0 is reserved as stop node):
             var newNodeAddress = new GrowableWriter(PackedInts.BitsRequired(bytes.GetPosition()),
                                                     (int)(1 + NodeCount), acceptableOverheadRatio);
 
+            // Fill initial coarse guess:
             for (var node = 1; node <= NodeCount; node++)
                 newNodeAddress.Set(node, 1 + bytes.GetPosition() - NodeAddress.Get(node));
 
@@ -1041,16 +1315,19 @@ namespace Lucene.Net.Util.Fst
 
             FST<T> fst;
 
+            // Iterate until we converge:
             while (true)
             {
                 var changed = false;
 
+                // for assert:
                 var negDelta = false;
 
                 fst = new FST<T>(InputType, Outputs, bytes.GetBlockBits());
 
                 var writer = fst.bytes;
 
+                // Skip 0 byte since 0 is reserved target:
                 writer.WriteByte((sbyte)0);
 
                 fst.ArcWithOutputCount = 0;
@@ -1063,6 +1340,9 @@ namespace Lucene.Net.Util.Fst
 
                 long addressError = 0;
 
+                // Since we re-reverse the bytes, we now write the
+                // nodes backwards, so taht BIT_TARGET_NEXT is
+                // unchanged:
                 for (var node = (int)NodeCount; node >= 1; node--)
                 {
                     fst.NodeCount++;
@@ -1081,11 +1361,15 @@ namespace Lucene.Net.Util.Fst
 
                     var retry = false;
 
+                    // for assert:
                     var anyNegDelta = false;
 
                     // in Java Lucene there is a label 'writeNode:'
-                    // writeNode:
-                    while (true)
+                    //writeNode:
+
+                    // Retry loop: possibly iterate more than once, if
+                    // this is an array'd node and bytesPerArc changes
+                    while (true) // retry writing this node
                     {
                         ReadFirstRealTargetArc(node, arc, r);
 
@@ -1127,8 +1411,7 @@ namespace Lucene.Net.Util.Fst
                             }
                             else
                             {
-                                // TODO: assert is correct here?
-                                //Debug.Assert(arc.NextFinalOutput == NO_OUTPUT);
+                                // assert arc.nextFinalOutput == NO_OUTPUT;
                             }
 
                             if (!TargetHasArcs(arc))
@@ -1148,8 +1431,7 @@ namespace Lucene.Net.Util.Fst
                                 else
                                     absPtr = topNodeMap.Count + newNodeAddress.Get((int)arc.Target) + addressError;
 
-                                var delta = newNodeAddress.Get((int)arc.Target) + addressError - writer.GetPosition() -
-                                            2;
+                                var delta = newNodeAddress.Get((int)arc.Target) + addressError - writer.GetPosition() - 2;
                                 if (delta < 0)
                                 {
                                     anyNegDelta = true;
@@ -1157,15 +1439,16 @@ namespace Lucene.Net.Util.Fst
                                 }
 
                                 if (delta < absPtr)
+                                {
                                     flags += (sbyte)BIT_TARGET_DELTA;
+                                }
                             }
                             else
                             {
                                 absPtr = 0;
                             }
 
-                            // TODO: assert correct here?
-                            Debug.Assert(flags != ARCS_AS_FIXED_ARRAY);
+                            // assert flags != ARCS_AS_FIXED_ARRAY
                             writer.WriteByte(flags);
 
                             fst.WriteLabel(writer, arc.Label);
@@ -1212,6 +1495,13 @@ namespace Lucene.Net.Util.Fst
                             {
                                 var arcBytes = (int)(writer.GetPosition() - arcStartPos);
                                 maxBytesPerArc = Math.Max(maxBytesPerArc, arcBytes);
+                                // NOTE: this may in fact go "backwards", if
+                                // somehow (rarely, possibly never) we use
+                                // more bytesPerArc in this rewrite than the
+                                // incoming FST did... but in this case we
+                                // will retry (below) so it's OK to ovewrite
+                                // bytes:
+                                //wasted += bytesPerArc - arcBytes;
                                 writer.SkipBytes((int)(arcStartPos + bytesPerArc - writer.GetPosition()));
                             }
 
@@ -1224,7 +1514,10 @@ namespace Lucene.Net.Util.Fst
                         if (useArcArray)
                         {
                             if (maxBytesPerArc == bytesPerArc || (retry && maxBytesPerArc <= bytesPerArc))
+                            {
+                                // converged
                                 break;
+                            }
                         }
                         else
                         {
@@ -1246,8 +1539,13 @@ namespace Lucene.Net.Util.Fst
 
                 if (!changed)
                 {
-                    // TODO: assert correct here?
-                    Debug.Assert(!negDelta);
+                    // We don't renumber the nodes (just reverse their
+                    // order) so nodes should only point forward to
+                    // other nodes because we only produce acyclic FSTs
+                    // w/ nodes only pointing "forwards":
+                    // assert != negDelta
+
+                    // Converged!
                     break;
                 }
             }
@@ -1266,12 +1564,13 @@ namespace Lucene.Net.Util.Fst
             fst.startNode = newNodeAddress.Get((int)startNode);
 
             if (emptyOutput != null)
+            {
                 fst.emptyOutput = emptyOutput;
+            }
 
-            // TODO: assert correct here?
-            Debug.Assert(fst.NodeCount == NodeCount, "fst.NodeCount=" + fst.NodeCount + " NodeCount=" + NodeCount);
-            Debug.Assert(fst.ArcCount == ArcCount);
-            Debug.Assert(fst.ArcWithOutputCount == ArcWithOutputCount, "fst.ArcWithOutputCount=" + fst.ArcWithOutputCount + " ArcWithOutputCount=" + ArcWithOutputCount);
+            //assert fst.nodeCount == nodeCount: "fst.nodeCount=" + fst.nodeCount + " nodeCount=" + nodeCount;
+            //assert fst.arcCount == arcCount;
+            //assert fst.arcWithOutputCount == arcWithOutputCount: "fst.arcWithOutputCount=" + fst.arcWithOutputCount + " arcWithOutputCount=" + arcWithOutputCount;
 
             fst.bytes.Finish();
             fst.CacheRootArcs();
@@ -1290,69 +1589,136 @@ namespace Lucene.Net.Util.Fst
         internal const int BIT_LAST_ARC = 1 << 1;
         internal const int BIT_TARGET_NEXT = 1 << 2;
 
+        // TODO: we can free up a bit if we can nuke this:
         internal const int BIT_STOP_NODE = 1 << 3;
         internal const int BIT_ARC_HAS_OUTPUT = 1 << 4;
         internal const int BIT_ARC_HAS_FINAL_OUTPUT = 1 << 5;
 
+        // Arcs are stored as fixed-size (per entry) array, so
+        // that we can find an arc using binary search.  We do
+        // this when number of arcs is > NUM_ARCS_ARRAY:
+
+        // If set, thie target node is delta coded vs current position:
         internal const int BIT_TARGET_DELTA = 1 << 6;
 
+        // We use this as a marker (because this one flag is
+        // illegal by itself ...):
         internal const sbyte ARCS_AS_FIXED_ARRAY = BIT_ARC_HAS_FINAL_OUTPUT;
 
+        /// <summary>
+        /// <see cref="UnCompiledNode"/>
+        /// </summary>
         internal const int FIXED_ARRAY_SHALLOW_DISTANCE = 3;
-        
+
+        /// <summary>
+        /// <see cref="UnCompiledNode"/>
+        /// </summary>
         internal const int FIXED_ARRAY_NUM_ARCS_SHALLOW = 5;
 
+        /// <summary>
+        /// <see cref="UnCompiledNode"/>
+        /// </summary>
         internal const int FIXED_ARRAY_NUM_ARCS_DEEP = 10;
 
+        // Increment version to change it
         internal const string FILE_FORMAT_NAME = "FST";
         internal const int VERSION_START = 0;
 
+        /// <summary>
+        /// Changed numBytesPerArc for array'd case from byte to int.
+        /// </summary>
         internal const int VERSION_INT_NUM_BYTES_PER_ARC = 1;
 
+        /// <summary>
+        /// Write BYTE2 labels as 2-byte short, not vInt.
+        /// </summary>
         internal const int VERSION_SHORT_BYTE2_LABELS = 2;
 
-        internal const int VERSIONpacked = 3;
+        /// <summary>
+        /// Added optional packed format.
+        /// </summary>
+        internal const int VERSION_PACKED = 3;
 
+        /// <summary>
+        /// Changed from int to vInt for encoding arc targets.
+        /// Also changed maxBytesPerArc from int to vInt in the array case.
+        /// </summary>
         internal const int VERSION_VINT_TARGET = 4;
 
         internal const int VERSION_CURRENT = VERSION_VINT_TARGET;
 
+        // Never serialized; just used to represent the virtual
+        // final node w/ no arcs:
         internal const long FINAL_END_NODE = -1;
 
+        // Never serialized; just used to represent the virtual
+        // non-final node w/ no arcs:
         internal const long NON_FINAL_END_NODE = 0;
 
-
+        /// <summary>
+        /// Reads bytes stored in an FST.
+        /// </summary>
         public abstract class BytesReader : DataInput
         {
+            /// <summary>
+            /// Current read position
+            /// </summary>
             public abstract long Position { get; set; }
 
+            /// <summary>
+            /// Returns true if this reader uses reversed bytes 
+            /// under-the-hood.
+            /// </summary>
+            /// <returns></returns>
             public abstract bool Reversed();
 
+            /// <summary>
+            /// Skips bytes.
+            /// </summary>
+            /// <param name="count"></param>
             public abstract void SkipBytes(int count);
         }
 
+        /// <summary>
+        /// Specifies allowed range of each int input label for this FST.
+        /// </summary>
         public enum INPUT_TYPE { BYTE1, BYTE2, BYTE4 }
 
+        /// <summary>
+        /// Represents a single arc.
+        /// </summary>
+        /// <typeparam name="T"></typeparam>
         public sealed class Arc<T>
         {
             public int Label { get; set; }
             public T Output { get; set; }
 
+            // From node (ord or address); currently only used when
+            // building an FST w/ willPackFST=true:
             internal long Node { get; set; }
 
+            /// <summary>
+            /// To node (ord or address)
+            /// </summary>
             public long Target { get; set; }
 
             internal sbyte Flags { get; set; }
-
             public T NextFinalOutput { get; set; }
 
+            // address (into the byte[]), or ord/address if label == END_LABEL
             internal long NextArc { get; set; }
 
+            // This is non-zero if current arcs are fixed array:
             internal long PosArcsStart { get; set; }
             internal int BytesPerArc { get; set; }
             internal int ArcIdx { get; set; }
             internal int NumArcs { get; set; }
 
+            /// <summary>
+            /// Return this
+            /// </summary>
+            /// <param name="other"></param>
+            /// <returns></returns>
             public Arc<T> CopyFrom(Arc<T> other)
             {
                 Node = other.Node;
@@ -1436,8 +1802,11 @@ namespace Lucene.Net.Util.Fst
 
             public int CompareTo(NodeAndInCount other)
             {
-                if (Count > other.Count) return 1;
-                if (Count < other.Count) return -1;
+                if (Count > other.Count) 
+                    return 1;
+                if (Count < other.Count) 
+                    return -1;
+                // Tie-break: smaller node compares as greater than
                 return other.Node - Node;
             }
         }
@@ -1452,8 +1821,7 @@ namespace Lucene.Net.Util.Fst
             public override bool LessThan(NodeAndInCount a, NodeAndInCount b)
             {
                 var cmp = a.CompareTo(b);
-                // TODO: assert correct here?
-                Debug.Assert(cmp != 0);
+                // assert cmp != 0;
                 return cmp < 0;
             }
         }


[21/50] [abbrv] git commit: Massive cleanup, reducing compiler errors

Posted by mh...@apache.org.
Massive cleanup, reducing compiler errors


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/80561f72
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/80561f72
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/80561f72

Branch: refs/heads/branch_4x
Commit: 80561f72bcad93c9cd46011a0d3efb2031db996b
Parents: fc17ce3
Author: Paul Irwin <pa...@gmail.com>
Authored: Tue Jul 23 20:15:43 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Tue Jul 23 20:15:43 2013 -0400

----------------------------------------------------------------------
 src/core/Analysis/TokenFilter.cs                |    2 +-
 src/core/Analysis/TokenStream.cs                |    4 +-
 src/core/Analysis/TokenStreamToAutomaton.cs     |    2 +-
 src/core/Codecs/BlockTreeTermsWriter.cs         |    2 +-
 .../CompressingStoredFieldsFormat.cs            |   43 +-
 .../CompressingStoredFieldsIndexReader.cs       |  209 +--
 .../CompressingStoredFieldsIndexWriter.cs       |   50 +-
 .../CompressingStoredFieldsReader.cs            |  162 +--
 .../CompressingStoredFieldsWriter.cs            |  440 +++---
 .../Compressing/CompressingTermVectorsFormat.cs |   34 +-
 .../Compressing/CompressingTermVectorsReader.cs | 1352 +++++++++++++-----
 .../Compressing/CompressingTermVectorsWriter.cs |  933 ++++++------
 src/core/Codecs/Compressing/CompressionMode.cs  |   12 +-
 src/core/Codecs/FieldsProducer.cs               |    6 +-
 .../Lucene3x/Lucene3xStoredFieldsReader.cs      |    2 +-
 src/core/Codecs/Lucene3x/SegmentTermDocs.cs     |    2 +-
 src/core/Codecs/MultiLevelSkipListReader.cs     |    2 +-
 src/core/Codecs/PostingsWriterBase.cs           |    8 +-
 src/core/Document/Document.cs                   |   10 +
 src/core/Document/Field.cs                      |    8 +-
 src/core/Document/FieldType.cs                  |    2 +-
 src/core/Index/AtomicReader.cs                  |    2 +-
 src/core/Index/AtomicReaderContext.cs           |    2 +-
 src/core/Index/BaseCompositeReader.cs           |    7 +-
 src/core/Index/BinaryDocValuesWriter.cs         |    6 +-
 src/core/Index/ByteSliceReader.cs               |   14 +-
 src/core/Index/ByteSliceWriter.cs               |   28 +-
 src/core/Index/CompositeReader.cs               |   22 +-
 src/core/Index/CompositeReaderContext.cs        |    2 +-
 src/core/Index/DocValuesProcessor.cs            |    2 +-
 src/core/Index/DocumentsWriter.cs               |    2 +-
 src/core/Index/FilterDirectoryReader.cs         |    2 +-
 src/core/Index/FreqProxTermsWriterPerField.cs   |   10 +-
 src/core/Index/IIndexableField.cs               |    2 +-
 src/core/Index/IndexWriter.cs                   |    2 +-
 src/core/Index/IndexWriterConfig.cs             |   12 +-
 .../Index/KeepOnlyLastCommitDeletionPolicy.cs   |    4 +-
 src/core/Index/LogMergePolicy.cs                |    2 +-
 src/core/Index/MultiDocValues.cs                |   20 +-
 src/core/Index/MultiReader.cs                   |    2 +-
 src/core/Index/NormsConsumerPerField.cs         |    4 +
 src/core/Index/ParallelCompositeReader.cs       |    4 +-
 src/core/Index/SegmentInfos.cs                  |    2 +-
 src/core/Index/SlowCompositeReaderWrapper.cs    |   48 +-
 src/core/Index/StandardDirectoryReader.cs       |   10 +-
 src/core/Index/TermVectorsConsumerPerField.cs   |   14 +-
 src/core/Index/TermsEnum.cs                     |    4 +-
 src/core/Index/TermsHash.cs                     |    2 +-
 src/core/Index/TermsHashPerField.cs             |    8 +-
 src/core/Lucene.Net.csproj                      |    1 +
 src/core/Search/AutomatonQuery.cs               |    2 +-
 src/core/Search/BitsFilteredDocIdSet.cs         |    2 +-
 src/core/Search/ConstantScoreAutoRewrite.cs     |   11 +-
 src/core/Search/DocTermOrdsRangeFilter.cs       |    6 +-
 src/core/Search/FieldCacheRangeFilter.cs        |   34 +-
 src/core/Search/FieldComparator.cs              |   26 +-
 src/core/Search/FieldValueHitQueue.cs           |   16 +-
 src/core/Search/NumericRangeQuery.cs            |    6 +-
 src/core/Search/Payloads/PayloadNearQuery.cs    |  361 ++---
 src/core/Search/Payloads/PayloadSpanUtil.cs     |    4 +-
 src/core/Search/PhraseQuery.cs                  |    9 +-
 src/core/Search/PositiveScoresOnlyCollector.cs  |   82 +-
 src/core/Search/PrefixQuery.cs                  |    2 +-
 src/core/Search/PrefixTermsEnum.cs              |    2 +-
 src/core/Search/ReqExclScorer.cs                |   14 +-
 src/core/Search/ReqOptSumScorer.cs              |  140 +-
 src/core/Search/ScoreCachingWrappingScorer.cs   |   18 +-
 src/core/Search/ScoringRewrite.cs               |    7 +-
 src/core/Search/Similarities/BM25Similarity.cs  |   65 +-
 src/core/Search/Similarities/BasicStats.cs      |    7 +-
 src/core/Search/Similarities/MultiSimilarity.cs |    9 +-
 .../Similarities/PerFieldSimilarityWrapper.cs   |    7 +-
 src/core/Search/Similarities/TFIDFSimilarity.cs |    9 +-
 src/core/Search/Spans/FieldMaskingSpanQuery.cs  |    2 +-
 src/core/Search/Spans/NearSpansOrdered.cs       |  785 +++++-----
 src/core/Search/Spans/NearSpansUnordered.cs     |  783 +++++-----
 src/core/Search/Spans/SpanFirstQuery.cs         |    7 +-
 .../Search/Spans/SpanMultiTermQueryWrapper.cs   |   30 +-
 .../Search/Spans/SpanNearPayloadCheckQuery.cs   |    4 +-
 src/core/Search/Spans/SpanNearQuery.cs          |    4 +-
 src/core/Search/Spans/SpanNotQuery.cs           |   15 +-
 src/core/Search/Spans/SpanOrQuery.cs            |   17 +-
 src/core/Search/Spans/SpanPayloadCheckQuery.cs  |    8 +-
 src/core/Search/Spans/SpanPositionCheckQuery.cs |   27 +-
 src/core/Search/Spans/SpanPositionRangeQuery.cs |   34 +-
 src/core/Search/Spans/SpanQuery.cs              |    2 +-
 src/core/Search/Spans/SpanScorer.cs             |   11 +-
 src/core/Search/Spans/SpanTermQuery.cs          |    6 +-
 src/core/Search/Spans/SpanWeight.cs             |    7 +-
 src/core/Search/Spans/Spans.cs                  |    3 +-
 src/core/Search/Spans/TermSpans.cs              |    2 +-
 src/core/Search/TermCollectingRewrite.cs        |    4 +-
 src/core/Search/TopFieldCollector.cs            |    2 +-
 src/core/Support/ByteBuffer.cs                  |   20 +-
 src/core/Util/Automaton/State.cs                |    5 +
 src/core/Util/ByteBlockPool.cs                  |    4 +-
 src/core/Util/DocIdBitSet.cs                    |   14 +-
 src/core/Util/Fst/BytesStore.cs                 |    6 +-
 src/core/Util/Fst/FSTEnum.cs                    |    3 +-
 src/core/Util/Fst/ForwardBytesReader.cs         |    4 +-
 src/core/Util/Fst/ReverseBytesReader.cs         |    4 +-
 src/core/Util/NamedThreadFactory.cs             |    2 +-
 src/core/Util/OpenBitSetIterator.cs             |   14 +-
 src/core/Util/Packed/AppendingLongBuffer.cs     |    6 +-
 src/core/Util/Packed/BlockPackedWriter.cs       |    2 +-
 .../Util/Packed/MonotonicAppendingLongBuffer.cs |    6 +-
 src/core/Util/Packed/Packed64SingleBlock.cs     |   17 +-
 src/core/Util/RollingBuffer.cs                  |    2 +-
 108 files changed, 3637 insertions(+), 2589 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Analysis/TokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/TokenFilter.cs b/src/core/Analysis/TokenFilter.cs
index b1b9435..f818ff6 100644
--- a/src/core/Analysis/TokenFilter.cs
+++ b/src/core/Analysis/TokenFilter.cs
@@ -56,7 +56,7 @@ namespace Lucene.Net.Analysis
             {
                 if (input != null)
                 {
-                    input.Close();
+                    input.Dispose();
                 }
             }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Analysis/TokenStream.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/TokenStream.cs b/src/core/Analysis/TokenStream.cs
index 430e28f..518774b 100644
--- a/src/core/Analysis/TokenStream.cs
+++ b/src/core/Analysis/TokenStream.cs
@@ -150,6 +150,8 @@ namespace Lucene.Net.Analysis
             Dispose(true);
         }
 
-	    protected abstract void Dispose(bool disposing);
+        protected virtual void Dispose(bool disposing)
+        {
+        }
 	}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Analysis/TokenStreamToAutomaton.cs
----------------------------------------------------------------------
diff --git a/src/core/Analysis/TokenStreamToAutomaton.cs b/src/core/Analysis/TokenStreamToAutomaton.cs
index 9646a7c..8264f75 100644
--- a/src/core/Analysis/TokenStreamToAutomaton.cs
+++ b/src/core/Analysis/TokenStreamToAutomaton.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Analysis
             // Any tokens that start at our position leave from this state:
             internal State leaving;
 
-            public override void Reset()
+            public void Reset()
             {
                 arriving = null;
                 leaving = null;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/BlockTreeTermsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/BlockTreeTermsWriter.cs b/src/core/Codecs/BlockTreeTermsWriter.cs
index 2a0012f..14d5444 100644
--- a/src/core/Codecs/BlockTreeTermsWriter.cs
+++ b/src/core/Codecs/BlockTreeTermsWriter.cs
@@ -310,7 +310,7 @@ namespace Lucene.Net.Codecs
                     //if (DEBUG) {
                     //  System.out.println("      add sub=" + indexEnt.input + " " + indexEnt.input + " output=" + indexEnt.output);
                     //}
-                    builder.Add(Util.ToIntsRef(indexEnt.Input, scratchIntsRef), indexEnt.Output);
+                    builder.Add(Lucene.Net.Util.Fst.Util.ToIntsRef(indexEnt.Input, scratchIntsRef), indexEnt.Output);
                 }
             }
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressingStoredFieldsFormat.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsFormat.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsFormat.cs
index 991daee..b6725c7 100644
--- a/src/core/Codecs/Compressing/CompressingStoredFieldsFormat.cs
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsFormat.cs
@@ -37,11 +37,10 @@ namespace Lucene.Net.Codecs.Compressing
 {
     public class CompressingStoredFieldsFormat : StoredFieldsFormat
     {
-
-        private string _formatName;
-        private string _segmentSuffix;
-        private CompressionMode _compressionMode;
-        private int _chunkSize;
+        private readonly string formatName;
+        private readonly string segmentSuffix;
+        private readonly CompressionMode compressionMode;
+        private readonly int chunkSize;
 
         /**
          * Create a new {@link CompressingStoredFieldsFormat} with an empty segment 
@@ -50,10 +49,8 @@ namespace Lucene.Net.Codecs.Compressing
          * @see CompressingStoredFieldsFormat#CompressingStoredFieldsFormat(String, String, CompressionMode, int)
          */
         public CompressingStoredFieldsFormat(string formatName, CompressionMode compressionMode, int chunkSize)
+            : this(formatName, "", compressionMode, chunkSize)
         {
-            _formatName = formatName;
-            _compressionMode = compressionMode;
-            _chunkSize = chunkSize;
         }
 
         /**
@@ -92,35 +89,33 @@ namespace Lucene.Net.Codecs.Compressing
         public CompressingStoredFieldsFormat(string formatName, string segmentSuffix,
                                              CompressionMode compressionMode, int chunkSize)
         {
-            this._formatName = formatName;
-            this._segmentSuffix = segmentSuffix;
-            this._compressionMode = compressionMode;
+            this.formatName = formatName;
+            this.segmentSuffix = segmentSuffix;
+            this.compressionMode = compressionMode;
             if (chunkSize < 1)
             {
-                throw new System.ArgumentOutOfRangeException("chunkSize must be >= 1");
+                throw new ArgumentOutOfRangeException("chunkSize must be >= 1");
             }
-            this._chunkSize = chunkSize;
+            this.chunkSize = chunkSize;
 
         }
-
-        public override StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si,
-            FieldInfos fn, IOContext context)
+        
+        public override StoredFieldsReader FieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context)
         {
-            return new CompressingStoredFieldsReader(directory, si, _segmentSuffix, fn,
-                context, _formatName, _compressionMode);
+            return new CompressingStoredFieldsReader(directory, si, segmentSuffix, fn,
+                context, formatName, compressionMode);
         }
 
-        public override StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si,
-            IOContext context)
+        public override StoredFieldsWriter FieldsWriter(Directory directory, SegmentInfo si, IOContext context)
         {
-            return new CompressingStoredFieldsWriter(directory, si, _segmentSuffix, context,
-                _formatName, _compressionMode, _chunkSize);
+            return new CompressingStoredFieldsWriter(directory, si, segmentSuffix, context,
+                formatName, compressionMode, chunkSize);
         }
 
         public override string ToString()
         {
-            return this.GetType().Name + "(compressionMode=" + _compressionMode
-                + ", chunkSize=" + _chunkSize + ")";
+            return this.GetType().Name + "(compressionMode=" + compressionMode
+                + ", chunkSize=" + chunkSize + ")";
         }
 
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
index d5a16df..8023c44 100644
--- a/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexReader.cs
@@ -10,24 +10,24 @@ using System.Text;
 
 namespace Lucene.Net.Codecs.Compressing
 {
-    public sealed class CompressingStoredFieldsIndexReader: ICloneable //Closable??
+    public sealed class CompressingStoredFieldsIndexReader : ICloneable, IDisposable
     {
-        int maxDoc;
-        int[] docBases;
-        long[] startPointers;
-        int[] avgChunkDocs;
-        long[] avgChunkSizes;
-        PackedInts.Reader[] docBasesDeltas; // delta from the avg
-        PackedInts.Reader[] startPointersDeltas; // delta from the avg
+        internal readonly IndexInput fieldsIndexIn;
 
-        IndexInput fieldsIndexIn;
-
-        static long MoveLowOrderBitToSign(long n) 
+        internal static long MoveLowOrderBitToSign(long n)
         {
             return ((Number.URShift(n, 1) ^ -(n & 1)));
         }
 
-        public CompressingStoredFieldsIndexReader(IndexInput fieldsIndexIn, SegmentInfo si) 
+        internal readonly int maxDoc;
+        internal readonly int[] docBases;
+        internal readonly long[] startPointers;
+        internal readonly int[] avgChunkDocs;
+        internal readonly long[] avgChunkSizes;
+        internal readonly PackedInts.IReader[] docBasesDeltas; // delta from the avg
+        internal readonly PackedInts.IReader[] startPointersDeltas; // delta from the avg
+
+        public CompressingStoredFieldsIndexReader(IndexInput fieldsIndexIn, SegmentInfo si)
         {
             this.fieldsIndexIn = fieldsIndexIn;
             maxDoc = si.DocCount;
@@ -42,41 +42,46 @@ namespace Lucene.Net.Codecs.Compressing
 
             int blockCount = 0;
 
-            for (;;) {
-              int numChunks = fieldsIndexIn.ReadVInt();
-              if (numChunks == 0) {
-                break;
-              }
-
-              if (blockCount == docBases.Length) {
-                int newSize = ArrayUtil.Oversize(blockCount + 1, 8);
-                docBases = Arrays.CopyOf(docBases, newSize);
-                startPointers = Arrays.CopyOf(startPointers, newSize);
-                avgChunkDocs = Arrays.CopyOf(avgChunkDocs, newSize);
-                avgChunkSizes = Arrays.CopyOf(avgChunkSizes, newSize);
-                docBasesDeltas = Arrays.CopyOf(docBasesDeltas, newSize);
-                startPointersDeltas = Arrays.CopyOf(startPointersDeltas, newSize);
-              }
-
-              // doc bases
-              docBases[blockCount] = fieldsIndexIn.ReadVInt();
-              avgChunkDocs[blockCount] = fieldsIndexIn.ReadVInt();
-              int bitsPerDocBase = fieldsIndexIn.ReadVInt();
-              if (bitsPerDocBase > 32) {
-                throw new CorruptIndexException("Corrupted");
-              }
-              docBasesDeltas[blockCount] = (Lucene.Net.Util.Packed.PackedInts.Reader)PackedInts.GetReaderNoHeader(fieldsIndexIn, PackedInts.Format.PACKED, packedIntsVersion, numChunks, bitsPerDocBase);
-
-              // start pointers
-              startPointers[blockCount] = fieldsIndexIn.ReadVLong();
-              avgChunkSizes[blockCount] = fieldsIndexIn.ReadVLong();
-              int bitsPerStartPointer = fieldsIndexIn.ReadVInt();
-              if (bitsPerStartPointer > 64) {
-                throw new CorruptIndexException("Corrupted");
-              }
-              startPointersDeltas[blockCount] = (Lucene.Net.Util.Packed.PackedInts.Reader)PackedInts.GetReaderNoHeader(fieldsIndexIn, PackedInts.Format.PACKED, packedIntsVersion, numChunks, bitsPerStartPointer);
-
-              ++blockCount;
+            for (; ; )
+            {
+                int numChunks = fieldsIndexIn.ReadVInt();
+                if (numChunks == 0)
+                {
+                    break;
+                }
+
+                if (blockCount == docBases.Length)
+                {
+                    int newSize = ArrayUtil.Oversize(blockCount + 1, 8);
+                    docBases = Arrays.CopyOf(docBases, newSize);
+                    startPointers = Arrays.CopyOf(startPointers, newSize);
+                    avgChunkDocs = Arrays.CopyOf(avgChunkDocs, newSize);
+                    avgChunkSizes = Arrays.CopyOf(avgChunkSizes, newSize);
+                    docBasesDeltas = Arrays.CopyOf(docBasesDeltas, newSize);
+                    startPointersDeltas = Arrays.CopyOf(startPointersDeltas, newSize);
+                }
+
+                // doc bases
+                docBases[blockCount] = fieldsIndexIn.ReadVInt();
+                avgChunkDocs[blockCount] = fieldsIndexIn.ReadVInt();
+                int bitsPerDocBase = fieldsIndexIn.ReadVInt();
+                if (bitsPerDocBase > 32)
+                {
+                    throw new CorruptIndexException("Corrupted");
+                }
+                docBasesDeltas[blockCount] = (Lucene.Net.Util.Packed.PackedInts.Reader)PackedInts.GetReaderNoHeader(fieldsIndexIn, PackedInts.Format.PACKED, packedIntsVersion, numChunks, bitsPerDocBase);
+
+                // start pointers
+                startPointers[blockCount] = fieldsIndexIn.ReadVLong();
+                avgChunkSizes[blockCount] = fieldsIndexIn.ReadVLong();
+                int bitsPerStartPointer = fieldsIndexIn.ReadVInt();
+                if (bitsPerStartPointer > 64)
+                {
+                    throw new CorruptIndexException("Corrupted");
+                }
+                startPointersDeltas[blockCount] = (Lucene.Net.Util.Packed.PackedInts.Reader)PackedInts.GetReaderNoHeader(fieldsIndexIn, PackedInts.Format.PACKED, packedIntsVersion, numChunks, bitsPerStartPointer);
+
+                ++blockCount;
             }
 
             this.docBases = Arrays.CopyOf(docBases, blockCount);
@@ -99,77 +104,93 @@ namespace Lucene.Net.Codecs.Compressing
             this.startPointersDeltas = other.startPointersDeltas;
         }
 
-        private int Block(int docID) 
+        private int Block(int docID)
         {
             int lo = 0, hi = docBases.Length - 1;
-            while (lo <= hi) {
-              int mid = Number.URShift(lo + hi, 1);
-              int midValue = docBases[mid];
-              if (midValue == docID) {
-                return mid;
-              } else if (midValue < docID) {
-                lo = mid + 1;
-              } else {
-                hi = mid - 1;
-              }
+            while (lo <= hi)
+            {
+                int mid = Number.URShift(lo + hi, 1);
+                int midValue = docBases[mid];
+                if (midValue == docID)
+                {
+                    return mid;
+                }
+                else if (midValue < docID)
+                {
+                    lo = mid + 1;
+                }
+                else
+                {
+                    hi = mid - 1;
+                }
             }
             return hi;
         }
 
-        private int relativeDocBase(int block, int relativeChunk) 
+        private int RelativeDocBase(int block, int relativeChunk)
         {
             int expected = avgChunkDocs[block] * relativeChunk;
             long delta = MoveLowOrderBitToSign(docBasesDeltas[block].Get(relativeChunk));
-            return expected + (int) delta;
+            return expected + (int)delta;
         }
 
-          private long relativeStartPointer(int block, int relativeChunk) 
-          {
+        private long RelativeStartPointer(int block, int relativeChunk)
+        {
             long expected = avgChunkSizes[block] * relativeChunk;
             long delta = MoveLowOrderBitToSign(startPointersDeltas[block].Get(relativeChunk));
             return expected + delta;
-          }
+        }
 
-          private int relativeChunk(int block, int relativeDoc) 
-          {
+        private int RelativeChunk(int block, int relativeDoc)
+        {
             int lo = 0, hi = docBasesDeltas[block].Size() - 1;
-            while (lo <= hi) {
-              int mid = Number.URShift(lo + hi, 1);
-              int midValue = relativeDocBase(block, mid);
-              if (midValue == relativeDoc) {
-                return mid;
-              } else if (midValue < relativeDoc) {
-                lo = mid + 1;
-              } else {
-                hi = mid - 1;
-              }
+            while (lo <= hi)
+            {
+                int mid = Number.URShift(lo + hi, 1);
+                int midValue = RelativeDocBase(block, mid);
+                if (midValue == relativeDoc)
+                {
+                    return mid;
+                }
+                else if (midValue < relativeDoc)
+                {
+                    lo = mid + 1;
+                }
+                else
+                {
+                    hi = mid - 1;
+                }
             }
             return hi;
-          }
+        }
 
-          public long GetStartPointer(int docID) 
-          {
-            if (docID < 0 || docID >= maxDoc) {
-              throw new ArgumentException("docID out of range [0-" + maxDoc + "]: " + docID);
+        public long GetStartPointer(int docID)
+        {
+            if (docID < 0 || docID >= maxDoc)
+            {
+                throw new ArgumentException("docID out of range [0-" + maxDoc + "]: " + docID);
             }
             int block = Block(docID);
-            int relativeChunk = this.relativeChunk(block, docID - docBases[block]);
-            return startPointers[block] + relativeStartPointer(block, relativeChunk);
-          }
-
-          public override CompressingStoredFieldsIndexReader clone() 
-          {
-            if (fieldsIndexIn == null) {
-              return this;
-            } else {
-              return new CompressingStoredFieldsIndexReader(this);
+            int relativeChunk = this.RelativeChunk(block, docID - docBases[block]);
+            return startPointers[block] + RelativeStartPointer(block, relativeChunk);
+        }
+
+        public object Clone()
+        {
+            if (fieldsIndexIn == null)
+            {
+                return this;
+            }
+            else
+            {
+                return new CompressingStoredFieldsIndexReader(this);
             }
-          }
+        }
 
-          public override void close()
-          {
+        public void Dispose()
+        {
             IOUtils.Close(fieldsIndexIn);
-          }
+        }
 
     }
 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
index ece363a..3b348cf 100644
--- a/src/core/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsIndexWriter.cs
@@ -9,40 +9,40 @@ namespace Lucene.Net.Codecs.Compressing
 {
     public sealed class CompressingStoredFieldsIndexWriter : IDisposable
     {
-        static readonly int BLOCK_SIZE = 1024; // number of chunks to serialize at once
-
-        private IndexOutput fieldsIndexOut;
-        private int totalDocs;
-        private int blockDocs;
-        private int blockChunks;
-        private long firstStartPointer;
-        private long maxStartPointer;
-        private int[] docBaseDeltas;
-        private long[] startPointerDeltas;
-
-        static long moveSignToLowOrderBit(long n)
+        internal const int BLOCK_SIZE = 1024; // number of chunks to serialize at once
+
+        internal static long MoveSignToLowOrderBit(long n)
         {
             return (n >> 63) ^ (n << 1);
         }
 
-        CompressingStoredFieldsIndexWriter(IndexOutput indexOutput)
+        internal readonly IndexOutput fieldsIndexOut;
+        internal int totalDocs;
+        internal int blockDocs;
+        internal int blockChunks;
+        internal long firstStartPointer;
+        internal long maxStartPointer;
+        internal readonly int[] docBaseDeltas;
+        internal readonly long[] startPointerDeltas;
+
+        internal CompressingStoredFieldsIndexWriter(IndexOutput indexOutput)
         {
             this.fieldsIndexOut = indexOutput;
-            reset();
+            Reset();
             totalDocs = 0;
             docBaseDeltas = new int[BLOCK_SIZE];
             startPointerDeltas = new long[BLOCK_SIZE];
             fieldsIndexOut.WriteVInt(PackedInts.VERSION_CURRENT);
         }
 
-        private void reset()
+        private void Reset()
         {
             blockChunks = 0;
             blockDocs = 0;
             firstStartPointer = -1; // means unset
         }
 
-        private void writeBlock()
+        private void WriteBlock()
         {
             fieldsIndexOut.WriteVInt(blockChunks);
 
@@ -72,7 +72,7 @@ namespace Lucene.Net.Codecs.Compressing
             for (int i = 0; i < blockChunks; ++i)
             {
                 int delta = docBase - avgChunkDocs * i;
-                maxDelta |= moveSignToLowOrderBit(delta);
+                maxDelta |= MoveSignToLowOrderBit(delta);
                 docBase += docBaseDeltas[i];
             }
 
@@ -84,7 +84,7 @@ namespace Lucene.Net.Codecs.Compressing
             for (int i = 0; i < blockChunks; ++i)
             {
                 long delta = docBase - avgChunkDocs * i;
-                writer.Add(moveSignToLowOrderBit(delta));
+                writer.Add(MoveSignToLowOrderBit(delta));
                 docBase += docBaseDeltas[i];
             }
             writer.Finish();
@@ -107,7 +107,7 @@ namespace Lucene.Net.Codecs.Compressing
             {
                 startPointer += startPointerDeltas[i];
                 long delta = startPointer - avgChunkSize * i;
-                maxDelta |= moveSignToLowOrderBit(delta);
+                maxDelta |= MoveSignToLowOrderBit(delta);
             }
 
             int bitsPerStartPointer = PackedInts.BitsRequired(maxDelta);
@@ -119,17 +119,17 @@ namespace Lucene.Net.Codecs.Compressing
             {
                 startPointer += startPointerDeltas[i];
                 long delta = startPointer - avgChunkSize * i;
-                writer.Add(moveSignToLowOrderBit(delta));
+                writer.Add(MoveSignToLowOrderBit(delta));
             }
             writer.Finish();
         }
 
-        void writeIndex(int numDocs, long startPointer)
+        internal void WriteIndex(int numDocs, long startPointer)
         {
             if (blockChunks == BLOCK_SIZE)
             {
-                writeBlock();
-                reset();
+                WriteBlock();
+                Reset();
             }
 
             if (firstStartPointer == -1)
@@ -146,7 +146,7 @@ namespace Lucene.Net.Codecs.Compressing
             maxStartPointer = startPointer;
         }
 
-        void finish(int numDocs)
+        internal void Finish(int numDocs)
         {
             if (numDocs != totalDocs)
             {
@@ -154,7 +154,7 @@ namespace Lucene.Net.Codecs.Compressing
             }
             if (blockChunks > 0)
             {
-                writeBlock();
+                WriteBlock();
             }
             fieldsIndexOut.WriteVInt(0); // end marker
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
index a4d7f7d..170b3e3 100644
--- a/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsReader.cs
@@ -15,6 +15,7 @@
  * limitations under the License.
  */
 
+using Lucene.Net.Codecs.Lucene40;
 using Lucene.Net.Index;
 using Lucene.Net.Store;
 using Lucene.Net.Support;
@@ -47,7 +48,7 @@ namespace Lucene.Net.Codecs.Compressing
         {
             this.fieldInfos = reader.fieldInfos;
             this.fieldsStream = (IndexInput)reader.fieldsStream.Clone();
-            this.indexReader = reader.indexReader.clone();
+            this.indexReader = (CompressingStoredFieldsIndexReader)reader.indexReader.Clone();
             this.packedIntsVersion = reader.packedIntsVersion;
             this.compressionMode = reader.compressionMode;
             this.decompressor = (Decompressor)reader.decompressor.Clone();
@@ -68,14 +69,14 @@ namespace Lucene.Net.Codecs.Compressing
             IndexInput indexStream = null;
             try
             {
-                fieldsStream = d.OpenInput(IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);
-                string indexStreamFN = IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION);
+                fieldsStream = d.OpenInput(IndexFileNames.SegmentFileName(segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_EXTENSION), context);
+                string indexStreamFN = IndexFileNames.SegmentFileName(segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION);
                 indexStream = d.OpenInput(indexStreamFN, context);
 
-                string codecNameIdx = formatName + CODEC_SFX_IDX;
-                string codecNameDat = formatName + CODEC_SFX_DAT;
-                CodecUtil.CheckHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT);
-                CodecUtil.CheckHeader(fieldsStream, codecNameDat, VERSION_START, VERSION_CURRENT);
+                string codecNameIdx = formatName + CompressingStoredFieldsWriter.CODEC_SFX_IDX;
+                string codecNameDat = formatName + CompressingStoredFieldsWriter.CODEC_SFX_DAT;
+                CodecUtil.CheckHeader(indexStream, codecNameIdx, CompressingStoredFieldsWriter.VERSION_START, CompressingStoredFieldsWriter.VERSION_CURRENT);
+                CodecUtil.CheckHeader(fieldsStream, codecNameDat, CompressingStoredFieldsWriter.VERSION_START, CompressingStoredFieldsWriter.VERSION_CURRENT);
 
                 indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
                 indexStream = null;
@@ -98,7 +99,7 @@ namespace Lucene.Net.Codecs.Compressing
         /**
          * @throws AlreadyClosedException if this FieldsReader is closed
          */
-        private void ensureOpen()
+        private void EnsureOpen()
         {
             if (closed)
             {
@@ -109,7 +110,7 @@ namespace Lucene.Net.Codecs.Compressing
         /** 
          * Close the underlying {@link IndexInput}s.
          */
-        public override void Close()
+        protected override void Dispose(bool disposing)
         {
             if (!closed)
             {
@@ -120,62 +121,62 @@ namespace Lucene.Net.Codecs.Compressing
 
         private static void ReadField(ByteArrayDataInput input, StoredFieldVisitor visitor, FieldInfo info, int bits)
         {
-            switch (bits & TYPE_MASK)
+            switch (bits & CompressingStoredFieldsWriter.TYPE_MASK)
             {
-                case BYTE_ARR:
-                    int length = input.readVInt();
+                case CompressingStoredFieldsWriter.BYTE_ARR:
+                    int length = input.ReadVInt();
                     byte[] data = new byte[length];
-                    input.readBytes(data, 0, length);
-                    visitor.binaryField(info, data);
+                    input.ReadBytes(data, 0, length);
+                    visitor.BinaryField(info, (sbyte[])(Array)data);
                     break;
-                case STRING:
-                    length = input.readVInt();
+                case CompressingStoredFieldsWriter.STRING:
+                    length = input.ReadVInt();
                     data = new byte[length];
-                    input.readBytes(data, 0, length);
-                    visitor.stringField(info, new string(data, IOUtils.CHARSET_UTF_8));
+                    input.ReadBytes(data, 0, length);
+                    visitor.StringField(info, IOUtils.CHARSET_UTF_8.GetString(data));
                     break;
-                case NUMERIC_INT:
-                    visitor.intField(info, input.readInt());
+                case CompressingStoredFieldsWriter.NUMERIC_INT:
+                    visitor.IntField(info, input.ReadInt());
                     break;
-                case NUMERIC_FLOAT:
-                    visitor.floatField(info, Float.intBitsToFloat(input.readInt()));
+                case CompressingStoredFieldsWriter.NUMERIC_FLOAT:
+                    visitor.FloatField(info, Number.IntBitsToFloat(input.ReadInt()));
                     break;
-                case NUMERIC_LONG:
-                    visitor.longField(info, input.readLong());
+                case CompressingStoredFieldsWriter.NUMERIC_LONG:
+                    visitor.LongField(info, input.ReadLong());
                     break;
-                case NUMERIC_DOUBLE:
-                    visitor.doubleField(info, Double.longBitsToDouble(input.readLong()));
+                case CompressingStoredFieldsWriter.NUMERIC_DOUBLE:
+                    visitor.DoubleField(info, BitConverter.Int64BitsToDouble(input.ReadLong()));
                     break;
                 default:
-                    throw new AssertionError("Unknown type flag: " + Integer.toHexString(bits));
+                    throw new InvalidOperationException("Unknown type flag: " + bits.ToString("X"));
             }
         }
 
         private static void SkipField(ByteArrayDataInput input, int bits)
         {
-            switch (bits & TYPE_MASK)
+            switch (bits & CompressingStoredFieldsWriter.TYPE_MASK)
             {
-                case BYTE_ARR:
-                case STRING:
-                    int length = input.readVInt();
-                    input.skipBytes(length);
+                case CompressingStoredFieldsWriter.BYTE_ARR:
+                case CompressingStoredFieldsWriter.STRING:
+                    int length = input.ReadVInt();
+                    input.SkipBytes(length);
                     break;
-                case NUMERIC_INT:
-                case NUMERIC_FLOAT:
-                    input.readInt();
+                case CompressingStoredFieldsWriter.NUMERIC_INT:
+                case CompressingStoredFieldsWriter.NUMERIC_FLOAT:
+                    input.ReadInt();
                     break;
-                case NUMERIC_LONG:
-                case NUMERIC_DOUBLE:
-                    input.readLong();
+                case CompressingStoredFieldsWriter.NUMERIC_LONG:
+                case CompressingStoredFieldsWriter.NUMERIC_DOUBLE:
+                    input.ReadLong();
                     break;
                 default:
-                    throw new AssertionError("Unknown type flag: " + Integer.toHexString(bits));
+                    throw new InvalidOperationException("Unknown type flag: " + bits.ToString("X"));
             }
         }
 
         public override void VisitDocument(int docID, StoredFieldVisitor visitor)
         {
-            fieldsStream.Seek(indexReader.getStartPointer(docID));
+            fieldsStream.Seek(indexReader.GetStartPointer(docID));
 
             int docBase = fieldsStream.ReadVInt();
             int chunkDocs = fieldsStream.ReadVInt();
@@ -234,14 +235,14 @@ namespace Lucene.Net.Codecs.Compressing
                     {
                         //TODO - HACKMP - Paul, this is a point of concern for me, in that everything from this file, and the 
                         //decompressor.Decompress() contract is looking for int.  But, I don't want to simply cast from long to int here.
-                        off += it.Next();
+                        off += (int)it.Next();
                     }
                     offset = off;
                     length = (int)it.Next();
                     off += length;
                     for (int i = docID - docBase + 1; i < chunkDocs; ++i)
                     {
-                        off += it.Next();
+                        off += (int)it.Next();
                     }
                     totalLength = off;
                 }
@@ -263,10 +264,10 @@ namespace Lucene.Net.Codecs.Compressing
             for (int fieldIDX = 0; fieldIDX < numStoredFields; fieldIDX++)
             {
                 long infoAndBits = documentInput.ReadVLong();
-                int fieldNumber = Number.URShift(infoAndBits, TYPE_BITS); // (infoAndBits >>> TYPE_BITS);
+                int fieldNumber = (int)Number.URShift(infoAndBits, CompressingStoredFieldsWriter.TYPE_BITS); // (infoAndBits >>> TYPE_BITS);
                 FieldInfo fieldInfo = fieldInfos.FieldInfo(fieldNumber);
 
-                int bits = (int)(infoAndBits & TYPE_MASK);
+                int bits = (int)(infoAndBits & CompressingStoredFieldsWriter.TYPE_MASK);
 
                 switch (visitor.NeedsField(fieldInfo))
                 {
@@ -282,9 +283,9 @@ namespace Lucene.Net.Codecs.Compressing
             }
         }
 
-        public override StoredFieldsReader Clone()
+        public override object Clone()
         {
-            ensureOpen();
+            EnsureOpen();
             return new CompressingStoredFieldsReader(this);
         }
 
@@ -299,32 +300,25 @@ namespace Lucene.Net.Codecs.Compressing
         // .NET Port: renamed to GetChunkIterator to avoid conflict with nested type.
         internal ChunkIterator GetChunkIterator(int startDocID)
         {
-            ensureOpen();
-            fieldsStream.Seek(indexReader.getStartPointer(startDocID));
-            return new ChunkIterator(fieldsStream, indexReader, numDocs, packedIntsVersion, decompressor);
+            EnsureOpen();
+            fieldsStream.Seek(indexReader.GetStartPointer(startDocID));
+            return new ChunkIterator(this);
         }
 
         internal sealed class ChunkIterator
         {
-            private IndexInput _fieldsStream;
-            private CompressingStoredFieldsReader _indexReader;
-            private Decompressor _decompressor;
-            private int _numOfDocs;
-            private int _packedIntsVersion;
-            BytesRef bytes;
-            int docBase;
-            int chunkDocs;
-            int[] numStoredFields;
-            int[] lengths;
-
-            public ChunkIterator(IndexInput fieldsStream, CompressingStoredFieldsReader indexReader,
-                                    int numOfDocs, int packedIntsVersion, Decompressor decompressor)
+            internal BytesRef bytes;
+            internal int docBase;
+            internal int chunkDocs;
+            internal int[] numStoredFields;
+            internal int[] lengths;
+
+            private readonly CompressingStoredFieldsReader parent;
+
+            public ChunkIterator(CompressingStoredFieldsReader parent)
             {
-                _indexReader = indexReader;
-                _numOfDocs = numOfDocs;
-                _packedIntsVersion = packedIntsVersion;
-                _decompressor = decompressor;
-                _fieldsStream = fieldsStream;
+                this.parent = parent; // .NET Port
+
                 this.docBase = -1;
                 bytes = new BytesRef();
                 numStoredFields = new int[1];
@@ -349,12 +343,12 @@ namespace Lucene.Net.Codecs.Compressing
              */
             public void Next(int doc)
             {
-                _fieldsStream.Seek(_indexReader.getStartPointer(doc));
+                parent.fieldsStream.Seek(parent.indexReader.GetStartPointer(doc));
 
-                int docBase = _fieldsStream.ReadVInt();
-                int chunkDocs = _fieldsStream.ReadVInt();
+                int docBase = parent.fieldsStream.ReadVInt();
+                int chunkDocs = parent.fieldsStream.ReadVInt();
                 if (docBase < this.docBase + this.chunkDocs
-                    || docBase + chunkDocs > _numOfDocs)
+                    || docBase + chunkDocs > parent.numDocs)
                 {
                     throw new CorruptIndexException("Corrupted: current docBase=" + this.docBase
                         + ", current numDocs=" + this.chunkDocs + ", new docBase=" + docBase
@@ -372,15 +366,15 @@ namespace Lucene.Net.Codecs.Compressing
 
                 if (chunkDocs == 1)
                 {
-                    numStoredFields[0] = _fieldsStream.ReadVInt();
-                    lengths[0] = _fieldsStream.ReadVInt();
+                    numStoredFields[0] = parent.fieldsStream.ReadVInt();
+                    lengths[0] = parent.fieldsStream.ReadVInt();
                 }
                 else
                 {
-                    int bitsPerStoredFields = _fieldsStream.ReadVInt();
+                    int bitsPerStoredFields = parent.fieldsStream.ReadVInt();
                     if (bitsPerStoredFields == 0)
                     {
-                        Arrays.Fill(numStoredFields, 0, chunkDocs, _fieldsStream.ReadVInt());
+                        Arrays.Fill(numStoredFields, 0, chunkDocs, parent.fieldsStream.ReadVInt());
                     }
                     else if (bitsPerStoredFields > 31)
                     {
@@ -388,17 +382,17 @@ namespace Lucene.Net.Codecs.Compressing
                     }
                     else
                     {
-                        PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(_fieldsStream, PackedInts.Format.PACKED, _packedIntsVersion, chunkDocs, bitsPerStoredFields, 1);
+                        PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(parent.fieldsStream, PackedInts.Format.PACKED, parent.packedIntsVersion, chunkDocs, bitsPerStoredFields, 1);
                         for (int i = 0; i < chunkDocs; ++i)
                         {
                             numStoredFields[i] = (int)it.Next();
                         }
                     }
 
-                    int bitsPerLength = _fieldsStream.ReadVInt();
+                    int bitsPerLength = parent.fieldsStream.ReadVInt();
                     if (bitsPerLength == 0)
                     {
-                        Arrays.Fill(lengths, 0, chunkDocs, _fieldsStream.ReadVInt());
+                        Arrays.Fill(lengths, 0, chunkDocs, parent.fieldsStream.ReadVInt());
                     }
                     else if (bitsPerLength > 31)
                     {
@@ -406,7 +400,7 @@ namespace Lucene.Net.Codecs.Compressing
                     }
                     else
                     {
-                        PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(_fieldsStream, PackedInts.Format.PACKED, _packedIntsVersion, chunkDocs, bitsPerLength, 1);
+                        PackedInts.ReaderIterator it = (PackedInts.ReaderIterator)PackedInts.GetReaderIteratorNoHeader(parent.fieldsStream, PackedInts.Format.PACKED, parent.packedIntsVersion, chunkDocs, bitsPerLength, 1);
                         for (int i = 0; i < chunkDocs; ++i)
                         {
                             lengths[i] = (int)it.Next();
@@ -422,7 +416,7 @@ namespace Lucene.Net.Codecs.Compressing
             {
                 // decompress data
                 int chunkSize = this.ChunkSize();
-                _decompressor.Decompress(_fieldsStream, chunkSize, 0, chunkSize, bytes);
+                parent.decompressor.Decompress(parent.fieldsStream, chunkSize, 0, chunkSize, bytes);
                 if (bytes.length != chunkSize)
                 {
                     throw new CorruptIndexException("Corrupted: expected chunk size = " + this.ChunkSize() + ", got " + bytes.length);
@@ -434,10 +428,10 @@ namespace Lucene.Net.Codecs.Compressing
              */
             public void CopyCompressedData(DataOutput output)
             {
-                long chunkEnd = docBase + chunkDocs == _numOfDocs
-                    ? _fieldsStream.Length
-                    : _indexReader.getStartPointer(docBase + chunkDocs);
-                output.CopyBytes(_fieldsStream, chunkEnd - _fieldsStream.FilePointer);
+                long chunkEnd = docBase + chunkDocs == parent.numDocs
+                    ? parent.fieldsStream.Length
+                    : parent.indexReader.GetStartPointer(docBase + chunkDocs);
+                output.CopyBytes(parent.fieldsStream, chunkEnd - parent.fieldsStream.FilePointer);
             }
 
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs b/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs
index 1543196..15a7484 100644
--- a/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs
+++ b/src/core/Codecs/Compressing/CompressingStoredFieldsWriter.cs
@@ -1,3 +1,4 @@
+using Lucene.Net.Codecs.Lucene40;
 using Lucene.Net.Documents;
 using Lucene.Net.Index;
 using Lucene.Net.Store;
@@ -13,80 +14,82 @@ namespace Lucene.Net.Codecs.Compressing
 {
     public sealed class CompressingStoredFieldsWriter : StoredFieldsWriter
     {
-        static readonly int MAX_DOCUMENTS_PER_CHUNK = 128;
-        static readonly int STRING = 0x00;
-        static readonly int BYTE_ARR = 0x01;
-        static readonly int NUMERIC_INT = 0x02;
-        static readonly int NUMERIC_FLOAT = 0x03;
-        static readonly int NUMERIC_LONG = 0x04;
-        static readonly int NUMERIC_DOUBLE = 0x05;
-
-        static readonly int TYPE_BITS = PackedInts.bitsRequired(NUMERIC_DOUBLE);
-        static readonly int TYPE_MASK = (int)PackedInts.maxValue(TYPE_BITS);
-
-        static readonly String CODEC_SFX_IDX = "Index";
-        static readonly String CODEC_SFX_DAT = "Data";
-        static readonly int VERSION_START = 0;
-        static readonly int VERSION_CURRENT = VERSION_START;
-
-        private Directory directory;
-        private string segment;
-        private string segmentSuffix;
+        internal const int MAX_DOCUMENTS_PER_CHUNK = 128;
+
+        internal const int STRING = 0x00;
+        internal const int BYTE_ARR = 0x01;
+        internal const int NUMERIC_INT = 0x02;
+        internal const int NUMERIC_FLOAT = 0x03;
+        internal const int NUMERIC_LONG = 0x04;
+        internal const int NUMERIC_DOUBLE = 0x05;
+
+        internal static readonly int TYPE_BITS = PackedInts.BitsRequired(NUMERIC_DOUBLE);
+        internal static readonly int TYPE_MASK = (int)PackedInts.MaxValue(TYPE_BITS);
+
+        internal const String CODEC_SFX_IDX = "Index";
+        internal const String CODEC_SFX_DAT = "Data";
+        internal const int VERSION_START = 0;
+        internal const int VERSION_CURRENT = VERSION_START;
+
+        private readonly Directory directory;
+        private readonly string segment;
+        private readonly string segmentSuffix;
         private CompressingStoredFieldsIndexWriter indexWriter;
         private IndexOutput fieldsStream;
 
-        private CompressionMode compressionMode;
-        private Compressor compressor;
-        private int chunkSize;
+        private readonly CompressionMode compressionMode;
+        private readonly Compressor compressor;
+        private readonly int chunkSize;
 
-        private GrowableByteArrayDataOutput bufferedDocs;
+        private readonly GrowableByteArrayDataOutput bufferedDocs;
         private int[] numStoredFields; // number of stored fields
         private int[] endOffsets; // end offsets in bufferedDocs
         private int docBase; // doc ID at the beginning of the chunk
         private int numBufferedDocs; // docBase + numBufferedDocs == current doc ID
 
-        public CompressingStoredFieldsWriter(Directory directory, SegmentInfo si, string segmentSuffix, IOContext context, string formatName, CompressionMode compressionMode, int chunkSize) 
+        public CompressingStoredFieldsWriter(Directory directory, SegmentInfo si, string segmentSuffix, IOContext context, string formatName, CompressionMode compressionMode, int chunkSize)
         {
-          this.directory = directory;
-          this.segment = si.name;
-          this.segmentSuffix = segmentSuffix;
-          this.compressionMode = compressionMode;
-          this.compressor = compressionMode.newCompressor();
-          this.chunkSize = chunkSize;
-          this.docBase = 0;
-          this.bufferedDocs = new GrowableByteArrayDataOutput(chunkSize);
-          this.numStoredFields = new int[16];
-          this.endOffsets = new int[16];
-          this.numBufferedDocs = 0;
-
-          bool success = false;
-          IndexOutput indexStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION), context);
-          try 
-          {
-            fieldsStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);
-
-            string codecNameIdx = formatName + CODEC_SFX_IDX;
-            string codecNameDat = formatName + CODEC_SFX_DAT;
-            CodecUtil.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT);
-            CodecUtil.WriteHeader(fieldsStream, codecNameDat, VERSION_CURRENT);
-
-            indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
-            indexStream = null;
-
-            fieldsStream.WriteVInt(PackedInts.VERSION_CURRENT);
-
-            success = true;
-          } 
-          finally 
-          {
-            if (!success) {
-              IOUtils.CloseWhileHandlingException(indexStream);
-              abort();
+            this.directory = directory;
+            this.segment = si.name;
+            this.segmentSuffix = segmentSuffix;
+            this.compressionMode = compressionMode;
+            this.compressor = compressionMode.newCompressor();
+            this.chunkSize = chunkSize;
+            this.docBase = 0;
+            this.bufferedDocs = new GrowableByteArrayDataOutput(chunkSize);
+            this.numStoredFields = new int[16];
+            this.endOffsets = new int[16];
+            this.numBufferedDocs = 0;
+
+            bool success = false;
+            IndexOutput indexStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION), context);
+            try
+            {
+                fieldsStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_EXTENSION), context);
+
+                string codecNameIdx = formatName + CODEC_SFX_IDX;
+                string codecNameDat = formatName + CODEC_SFX_DAT;
+                CodecUtil.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT);
+                CodecUtil.WriteHeader(fieldsStream, codecNameDat, VERSION_CURRENT);
+
+                indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
+                indexStream = null;
+
+                fieldsStream.WriteVInt(PackedInts.VERSION_CURRENT);
+
+                success = true;
+            }
+            finally
+            {
+                if (!success)
+                {
+                    IOUtils.CloseWhileHandlingException((IDisposable)indexStream);
+                    Abort();
+                }
             }
-          }
         }
 
-        public override void Close()
+        protected override void Dispose(bool disposing)
         {
             try
             {
@@ -98,7 +101,7 @@ namespace Lucene.Net.Codecs.Compressing
                 indexWriter = null;
             }
         }
-
+        
         public override void StartDocument(int numStoredFields)
         {
             if (numBufferedDocs == this.numStoredFields.Length)
@@ -120,40 +123,45 @@ namespace Lucene.Net.Codecs.Compressing
             }
         }
 
-        private static void saveInts(int[] values, int length, DataOutput output) 
+        private static void SaveInts(int[] values, int length, DataOutput output)
         {
-          if (length == 1) 
-          {
-            output.WriteVInt(values[0]);
-          } 
-          else 
-          {
-            bool allEqual = true;
-            for (int i = 1; i < length; ++i) {
-              if (values[i] != values[0]) {
-                allEqual = false;
-                //break;
-              }
+            if (length == 1)
+            {
+                output.WriteVInt(values[0]);
             }
-            if (allEqual) {
-              output.WriteVInt(0);
-              output.WriteVInt(values[0]);
-            } 
-            else 
+            else
             {
-              long max = 0;
-              for (int i = 0; i < length; ++i) {
-                max |= values[i];
-              }
-              int bitsRequired = PackedInts.BitsRequired(max);
-              output.WriteVInt(bitsRequired);
-              PackedInts.Writer w = PackedInts.GetWriterNoHeader(output, PackedInts.Format.PACKED, length, bitsRequired, 1);
-              for (int i = 0; i < length; ++i) {
-                w.Add(values[i]);
-              }
-              w.Finish();
+                bool allEqual = true;
+                for (int i = 1; i < length; ++i)
+                {
+                    if (values[i] != values[0])
+                    {
+                        allEqual = false;
+                        //break;
+                    }
+                }
+                if (allEqual)
+                {
+                    output.WriteVInt(0);
+                    output.WriteVInt(values[0]);
+                }
+                else
+                {
+                    long max = 0;
+                    for (int i = 0; i < length; ++i)
+                    {
+                        max |= values[i];
+                    }
+                    int bitsRequired = PackedInts.BitsRequired(max);
+                    output.WriteVInt(bitsRequired);
+                    PackedInts.Writer w = PackedInts.GetWriterNoHeader(output, PackedInts.Format.PACKED, length, bitsRequired, 1);
+                    for (int i = 0; i < length; ++i)
+                    {
+                        w.Add(values[i]);
+                    }
+                    w.Finish();
+                }
             }
-          }
         }
 
         private void WriteHeader(int docBase, int numBufferedDocs, int[] numStoredFields, int[] lengths)
@@ -163,10 +171,10 @@ namespace Lucene.Net.Codecs.Compressing
             fieldsStream.WriteVInt(numBufferedDocs);
 
             // save numStoredFields
-            saveInts(numStoredFields, numBufferedDocs, fieldsStream);
+            SaveInts(numStoredFields, numBufferedDocs, fieldsStream);
 
             // save lengths
-            saveInts(lengths, numBufferedDocs, fieldsStream);
+            SaveInts(lengths, numBufferedDocs, fieldsStream);
         }
 
         private bool TriggerFlush()
@@ -197,37 +205,37 @@ namespace Lucene.Net.Codecs.Compressing
             bufferedDocs.Length = 0;
         }
 
-        public override void writeField(FieldInfo info, IndexableField field)
+        public override void WriteField(FieldInfo info, IIndexableField field)
         {
           int bits = 0;
           BytesRef bytes;
           string str;
 
-          Number number = field.numericValue();
+          object number = field.NumericValue;
           if (number != null) {
-            if (number instanceof Byte || number instanceof Short || number instanceof Integer) {
+            if (number is byte || number is sbyte || number is short || number is int) {
               bits = NUMERIC_INT;
-            } else if (number instanceof Long) {
+            } else if (number is long) {
               bits = NUMERIC_LONG;
-            } else if (number instanceof Float) {
+            } else if (number is float) {
               bits = NUMERIC_FLOAT;
-            } else if (number instanceof Double) {
+            } else if (number is double) {
               bits = NUMERIC_DOUBLE;
             } else {
-              throw new IllegalArgumentException("cannot store numeric type " + number.getClass());
+              throw new ArgumentException("cannot store numeric type " + number.GetType());
             }
             str = null;
             bytes = null;
           } else {
-            bytes = field.binaryValue();
+            bytes = field.BinaryValue;
             if (bytes != null) {
               bits = BYTE_ARR;
               str = null;
             } else {
               bits = STRING;
-              str = field.stringValue();
+              str = field.StringValue;
               if (str == null) {
-                throw new ArgumentException("field " + field.name() + " is stored but does not have binaryValue, stringValue nor numericValue");
+                throw new ArgumentException("field " + field.Name + " is stored but does not have binaryValue, stringValue nor numericValue");
               }
             }
           }
@@ -239,126 +247,144 @@ namespace Lucene.Net.Codecs.Compressing
             bufferedDocs.WriteVInt(bytes.length);
             bufferedDocs.WriteBytes(bytes.bytes, bytes.offset, bytes.length);
           } else if (str != null) {
-            bufferedDocs.WriteString(field.stringValue());
+            bufferedDocs.WriteString(field.StringValue);
           } else {
-            if (number instanceof Byte || number instanceof Short || number instanceof Integer) {
-              bufferedDocs.writeInt(number.intValue());
-            } else if (number instanceof Long) {
-              bufferedDocs.writeLong(number.longValue());
-            } else if (number instanceof Float) {
-              bufferedDocs.writeInt(Float.floatToIntBits(number.floatValue()));
-            } else if (number instanceof Double) {
-              bufferedDocs.writeLong(Double.doubleToLongBits(number.doubleValue()));
+            if (number is byte || number is sbyte || number is short || number is int) {
+              bufferedDocs.WriteInt((int)number);
+            } else if (number is long) {
+              bufferedDocs.WriteLong((long)number);
+            } else if (number is float) {
+              bufferedDocs.WriteInt(Number.FloatToIntBits((float)number));
+            } else if (number is double) {
+              bufferedDocs.WriteLong(BitConverter.DoubleToInt64Bits((double)number));
             } else {
-              throw new AssertionError("Cannot get here");
+              throw new InvalidOperationException("Cannot get here");
             }
           }
         }
 
-        public override void Abort() {
-          IOUtils.CloseWhileHandlingException(this);
-          IOUtils.DeleteFilesIgnoringExceptions(directory,
-              IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_EXTENSION),
-              IndexFileNames.SegmentFileName(segment, segmentSuffix, FIELDS_INDEX_EXTENSION));
-        }
-
-        public override void finish(FieldInfos fis, int numDocs) 
+        public override void Abort()
         {
-          if (numBufferedDocs > 0) {
-            Flush();
-          } else {
-            //assert bufferedDocs.length == 0;
-          }
-          if (docBase != numDocs) {
-            throw new RuntimeException("Wrote " + docBase + " docs, finish called with numDocs=" + numDocs);
-          }
-          indexWriter.finish(numDocs);
+            IOUtils.CloseWhileHandlingException((IDisposable)this);
+            IOUtils.DeleteFilesIgnoringExceptions(directory,
+                IndexFileNames.SegmentFileName(segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_EXTENSION),
+                IndexFileNames.SegmentFileName(segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION));
         }
 
-        public override int Merge(MergeState mergeState) 
+        public override void Finish(FieldInfos fis, int numDocs)
         {
-          int docCount = 0;
-          int idx = 0;
-
-          foreach (AtomicReader reader in mergeState.readers) 
-          {
-            SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
-            CompressingStoredFieldsReader matchingFieldsReader = null;
-            if (matchingSegmentReader != null) 
+            if (numBufferedDocs > 0)
             {
-              StoredFieldsReader fieldsReader = matchingSegmentReader.FieldsReader;
-              // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
-              if (fieldsReader != null && fieldsReader is CompressingStoredFieldsReader) 
-              {
-                matchingFieldsReader = (CompressingStoredFieldsReader) fieldsReader;
-              }
+                Flush();
             }
+            else
+            {
+                //assert bufferedDocs.length == 0;
+            }
+            if (docBase != numDocs)
+            {
+                throw new SystemException("Wrote " + docBase + " docs, finish called with numDocs=" + numDocs);
+            }
+            indexWriter.Finish(numDocs);
+        }
 
-            int maxDoc = reader.MaxDoc;
-            IBits liveDocs = reader.LiveDocs;
+        public override int Merge(MergeState mergeState)
+        {
+            int docCount = 0;
+            int idx = 0;
 
-            if (matchingFieldsReader == null) {
-              // naive merge...
-              for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = NextLiveDoc(i + 1, liveDocs, maxDoc)) {
-                Document doc = reader.Document(i);
-                AddDocument(doc, mergeState.fieldInfos);
-                ++docCount;
-                mergeState.checkAbort.Work(300);
-              }
-            } else {
-              int docID = NextLiveDoc(0, liveDocs, maxDoc);
-              if (docID < maxDoc) {
-                // not all docs were deleted
-                ChunkIterator it = matchingFieldsReader.ChunkIterator(docID);
-                int[] startOffsets = new int[0];
-                do {
-                  // go to the next chunk that contains docID
-                  it.next(docID);
-                  // transform lengths into offsets
-                  if (startOffsets.Length < it.chunkDocs) {
-                    startOffsets = new int[ArrayUtil.Oversize(it.chunkDocs, 4)];
-                  }
-                  for (int i = 1; i < it.chunkDocs; ++i) {
-                    startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
-                  }
-
-                  if (compressionMode == matchingFieldsReader.getCompressionMode() // same compression mode
-                      && numBufferedDocs == 0 // starting a new chunk
-                      && startOffsets[it.chunkDocs - 1] < chunkSize // chunk is small enough
-                      && startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] >= chunkSize // chunk is large enough
-                      && nextDeletedDoc(it.docBase, liveDocs, it.docBase + it.chunkDocs) == it.docBase + it.chunkDocs) { // no deletion in the chunk
-
-                    // no need to decompress, just copy data
-                    indexWriter.writeIndex(it.chunkDocs, fieldsStream.FilePointer);
-                    WriteHeader(this.docBase, it.chunkDocs, it.numStoredFields, it.lengths);
-                    it.copyCompressedData(fieldsStream);
-                    this.docBase += it.chunkDocs;
-                    docID = NextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
-                    docCount += it.chunkDocs;
-                    mergeState.checkAbort.Work(300 * it.chunkDocs);
-                  } else {
-                    // decompress
-                    it.decompress();
-                    if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length) {
-                      throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
+            foreach (AtomicReader reader in mergeState.readers)
+            {
+                SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
+                CompressingStoredFieldsReader matchingFieldsReader = null;
+                if (matchingSegmentReader != null)
+                {
+                    StoredFieldsReader fieldsReader = matchingSegmentReader.FieldsReader;
+                    // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
+                    if (fieldsReader != null && fieldsReader is CompressingStoredFieldsReader)
+                    {
+                        matchingFieldsReader = (CompressingStoredFieldsReader)fieldsReader;
                     }
-                    // copy non-deleted docs
-                    for (; docID < it.docBase + it.chunkDocs; docID = NextLiveDoc(docID + 1, liveDocs, maxDoc)) {
-                      int diff = docID - it.docBase;
-                      StartDocument(it.numStoredFields[diff]);
-                      bufferedDocs.WriteBytes(it.bytes.bytes, it.bytes.offset + startOffsets[diff], it.lengths[diff]);
-                      FinishDocument();
-                      ++docCount;
-                      mergeState.checkAbort.Work(300);
+                }
+
+                int maxDoc = reader.MaxDoc;
+                IBits liveDocs = reader.LiveDocs;
+
+                if (matchingFieldsReader == null)
+                {
+                    // naive merge...
+                    for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = NextLiveDoc(i + 1, liveDocs, maxDoc))
+                    {
+                        Document doc = reader.Document(i);
+                        AddDocument(doc, mergeState.fieldInfos);
+                        ++docCount;
+                        mergeState.checkAbort.Work(300);
                     }
-                  }
-                } while (docID < maxDoc);
-              }
+                }
+                else
+                {
+                    int docID = NextLiveDoc(0, liveDocs, maxDoc);
+                    if (docID < maxDoc)
+                    {
+                        // not all docs were deleted
+                        CompressingStoredFieldsReader.ChunkIterator it = matchingFieldsReader.GetChunkIterator(docID);
+                        int[] startOffsets = new int[0];
+                        do
+                        {
+                            // go to the next chunk that contains docID
+                            it.Next(docID);
+                            // transform lengths into offsets
+                            if (startOffsets.Length < it.chunkDocs)
+                            {
+                                startOffsets = new int[ArrayUtil.Oversize(it.chunkDocs, 4)];
+                            }
+                            for (int i = 1; i < it.chunkDocs; ++i)
+                            {
+                                startOffsets[i] = startOffsets[i - 1] + it.lengths[i - 1];
+                            }
+
+                            if (compressionMode == matchingFieldsReader.CompressionMode // same compression mode
+                                && numBufferedDocs == 0 // starting a new chunk
+                                && startOffsets[it.chunkDocs - 1] < chunkSize // chunk is small enough
+                                && startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] >= chunkSize // chunk is large enough
+                                && NextDeletedDoc(it.docBase, liveDocs, it.docBase + it.chunkDocs) == it.docBase + it.chunkDocs)
+                            { // no deletion in the chunk
+
+                                // no need to decompress, just copy data
+                                indexWriter.WriteIndex(it.chunkDocs, fieldsStream.FilePointer);
+                                WriteHeader(this.docBase, it.chunkDocs, it.numStoredFields, it.lengths);
+                                it.CopyCompressedData(fieldsStream);
+                                this.docBase += it.chunkDocs;
+                                docID = NextLiveDoc(it.docBase + it.chunkDocs, liveDocs, maxDoc);
+                                docCount += it.chunkDocs;
+                                mergeState.checkAbort.Work(300 * it.chunkDocs);
+                            }
+                            else
+                            {
+                                // decompress
+                                it.Decompress();
+                                if (startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] != it.bytes.length)
+                                {
+                                    throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.chunkDocs - 1] + it.lengths[it.chunkDocs - 1] + ", got " + it.bytes.length);
+                                }
+                                // copy non-deleted docs
+                                for (; docID < it.docBase + it.chunkDocs; docID = NextLiveDoc(docID + 1, liveDocs, maxDoc))
+                                {
+                                    int diff = docID - it.docBase;
+                                    StartDocument(it.numStoredFields[diff]);
+                                    bufferedDocs.WriteBytes(it.bytes.bytes, it.bytes.offset + startOffsets[diff], it.lengths[diff]);
+                                    FinishDocument();
+                                    ++docCount;
+                                    mergeState.checkAbort.Work(300);
+                                }
+                            }
+                        } while (docID < maxDoc);
+                    }
+                }
             }
-          }
 
-          Finish(mergeState.fieldInfos, docCount);
-          return docCount;
+            Finish(mergeState.fieldInfos, docCount);
+            return docCount;
         }
 
         private static int NextLiveDoc(int doc, IBits liveDocs, int maxDoc)
@@ -374,7 +400,7 @@ namespace Lucene.Net.Codecs.Compressing
             return doc;
         }
 
-        private static int nextDeletedDoc(int doc, Bits liveDocs, int maxDoc)
+        private static int NextDeletedDoc(int doc, IBits liveDocs, int maxDoc)
         {
             if (liveDocs == null)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressingTermVectorsFormat.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingTermVectorsFormat.cs b/src/core/Codecs/Compressing/CompressingTermVectorsFormat.cs
index 0a2afd1..49aac8a 100644
--- a/src/core/Codecs/Compressing/CompressingTermVectorsFormat.cs
+++ b/src/core/Codecs/Compressing/CompressingTermVectorsFormat.cs
@@ -1,18 +1,20 @@
-using System;
+using Lucene.Net.Index;
+using Lucene.Net.Store;
+using System;
 using System.Collections.Generic;
 using System.Linq;
 using System.Text;
 
 namespace Lucene.Net.Codecs.Compressing
 {
-    public class CompressingTermVectorsFormat: TermVectorsFormat
+    public class CompressingTermVectorsFormat : TermVectorsFormat
     {
-        private string formatName;
-        private string segmentSuffix;
-        private CompressionMode compressionMode;
-        private int chunkSize;
+        private readonly string formatName;
+        private readonly string segmentSuffix;
+        private readonly CompressionMode compressionMode;
+        private readonly int chunkSize;
 
-        public CompressingTermVectorsFormat(String formatName, String segmentSuffix, 
+        public CompressingTermVectorsFormat(string formatName, string segmentSuffix,
             CompressionMode compressionMode, int chunkSize)
         {
             this.formatName = formatName;
@@ -24,5 +26,23 @@ namespace Lucene.Net.Codecs.Compressing
             }
             this.chunkSize = chunkSize;
         }
+
+        public override TermVectorsReader VectorsReader(Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context)
+        {
+            return new CompressingTermVectorsReader(directory, segmentInfo, segmentSuffix,
+                fieldInfos, context, formatName, compressionMode);
+        }
+
+        public override TermVectorsWriter VectorsWriter(Directory directory, SegmentInfo segmentInfo, IOContext context)
+        {
+            return new CompressingTermVectorsWriter(directory, segmentInfo, segmentSuffix,
+                context, formatName, compressionMode, chunkSize);
+        }
+
+        public override string ToString()
+        {
+            return GetType().Name + "(compressionMode=" + compressionMode
+                + ", chunkSize=" + chunkSize + ")";
+        }
     }
 }


[33/50] [abbrv] git commit: Correct issue with VInts

Posted by mh...@apache.org.
Correct issue with VInts


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/4c65df01
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/4c65df01
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/4c65df01

Branch: refs/heads/branch_4x
Commit: 4c65df01543ea2da6bcb488362c8cba3db7fff7f
Parents: 37289ca
Author: Paul Irwin <pa...@gmail.com>
Authored: Wed Aug 7 09:37:18 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Wed Aug 7 09:37:18 2013 -0400

----------------------------------------------------------------------
 .../Lucene42/Lucene42DocValuesProducer.cs       |  5 +--
 src/core/Document/Field.cs                      |  7 +++-
 src/core/Index/IIndexableField.cs               |  2 +-
 src/core/Store/BufferedIndexInput.cs            | 40 +++++++++++-------
 src/core/Store/ByteArrayDataInput.cs            | 42 ++++++++++++-------
 src/core/Store/DataInput.cs                     | 44 ++++++++++++--------
 6 files changed, 86 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4c65df01/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs b/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs
index 8718010..383a6a5 100644
--- a/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs
+++ b/src/core/Codecs/Lucene42/Lucene42DocValuesProducer.cs
@@ -69,9 +69,8 @@ namespace Lucene.Net.Codecs.Lucene42
         private void ReadFields(IndexInput meta, FieldInfos infos)
         {
             int fieldNumber = meta.ReadVInt();
-            // TODO: .NET Port: I had to add the != 255 case here for it to work in testing, but that means 
-            // you can't have more than 255 fields, which seems wrong to me.
-            while (fieldNumber != -1 && fieldNumber != 255)
+            
+            while (fieldNumber != -1)
             {
                 int fieldType = meta.ReadByte();
                 if (fieldType == Lucene42DocValuesConsumer.NUMBER)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4c65df01/src/core/Document/Field.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/Field.cs b/src/core/Document/Field.cs
index 5ed5585..578680b 100644
--- a/src/core/Document/Field.cs
+++ b/src/core/Document/Field.cs
@@ -346,13 +346,16 @@ namespace Lucene.Net.Documents
             }
         }
 
-        public long NumericValue
+        public object NumericValue
         {
             get
             {
                 // .NET Port: No base type for all numeric types, so unless we want to rewrite this
                 // to be LongValue, IntValue, FloatValue, etc, this will have to do.
-                return Convert.ToInt64(fieldsData);
+                if (fieldsData is int || fieldsData is byte || fieldsData is short || fieldsData is long)
+                    return Convert.ToInt64(fieldsData);
+
+                return null;
             }
         }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4c65df01/src/core/Index/IIndexableField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IIndexableField.cs b/src/core/Index/IIndexableField.cs
index f7f12f7..29ff853 100644
--- a/src/core/Index/IIndexableField.cs
+++ b/src/core/Index/IIndexableField.cs
@@ -22,7 +22,7 @@ namespace Lucene.Net.Index
 
         TextReader ReaderValue { get; }
 
-        long NumericValue { get; }
+        object NumericValue { get; }
 
         TokenStream TokenStream(Analyzer analyzer);
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4c65df01/src/core/Store/BufferedIndexInput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/BufferedIndexInput.cs b/src/core/Store/BufferedIndexInput.cs
index 6243a5f..804ccdd 100644
--- a/src/core/Store/BufferedIndexInput.cs
+++ b/src/core/Store/BufferedIndexInput.cs
@@ -221,23 +221,33 @@ namespace Lucene.Net.Store
         {
             if (5 <= (bufferLength - bufferPosition))
             {
+                // .NET Port: going back to original code to avoid sbyte/byte diff
                 byte b = buffer[bufferPosition++];
-                if (b >= 0) return b;
                 int i = b & 0x7F;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7F) << 7;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7F) << 14;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                i |= (b & 0x7F) << 21;
-                if (b >= 0) return i;
-                b = buffer[bufferPosition++];
-                // Warning: the next ands use 0x0F / 0xF0 - beware copy/paste errors:
-                i |= (b & 0x0F) << 28;
-                if ((b & 0xF0) == 0) return i;
-                throw new System.IO.IOException("Invalid vInt detected (too many bits)");
+                for (int shift = 7; (b & 0x80) != 0; shift += 7)
+                {
+                    b = buffer[bufferPosition++];
+                    i |= (b & 0x7F) << shift;
+                }
+                return i;
+
+                //byte b = buffer[bufferPosition++];
+                //if (b >= 0) return b;
+                //int i = b & 0x7F;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7F) << 7;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7F) << 14;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //i |= (b & 0x7F) << 21;
+                //if (b >= 0) return i;
+                //b = buffer[bufferPosition++];
+                //// Warning: the next ands use 0x0F / 0xF0 - beware copy/paste errors:
+                //i |= (b & 0x0F) << 28;
+                //if ((b & 0xF0) == 0) return i;
+                //throw new System.IO.IOException("Invalid vInt detected (too many bits)");
             }
             else
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4c65df01/src/core/Store/ByteArrayDataInput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/ByteArrayDataInput.cs b/src/core/Store/ByteArrayDataInput.cs
index e865613..ea36c6b 100644
--- a/src/core/Store/ByteArrayDataInput.cs
+++ b/src/core/Store/ByteArrayDataInput.cs
@@ -90,23 +90,33 @@ namespace Lucene.Net.Store
 
         public override int ReadVInt()
         {
+            // .NET Port: going back to original style code instead of Java code below due to sbyte/byte diff
             byte b = bytes[pos++];
-            if (b >= 0) return b;
             int i = b & 0x7F;
-            b = bytes[pos++];
-            i |= (b & 0x7F) << 7;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            i |= (b & 0x7F) << 14;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            i |= (b & 0x7F) << 21;
-            if (b >= 0) return i;
-            b = bytes[pos++];
-            // Warning: the next ands use 0x0F / 0xF0 - beware copy/paste errors:
-            i |= (b & 0x0F) << 28;
-            if ((b & 0xF0) == 0) return i;
-            throw new InvalidOperationException("Invalid vInt detected (too many bits)");
+            for (int shift = 7; (b & 0x80) != 0; shift += 7)
+            {
+                b = bytes[pos++];
+                i |= (b & 0x7F) << shift;
+            }
+            return i;
+
+            //byte b = bytes[pos++];
+            //if (b >= 0) return b;
+            //int i = b & 0x7F;
+            //b = bytes[pos++];
+            //i |= (b & 0x7F) << 7;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //i |= (b & 0x7F) << 14;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //i |= (b & 0x7F) << 21;
+            //if (b >= 0) return i;
+            //b = bytes[pos++];
+            //// Warning: the next ands use 0x0F / 0xF0 - beware copy/paste errors:
+            //i |= (b & 0x0F) << 28;
+            //if ((b & 0xF0) == 0) return i;
+            //throw new InvalidOperationException("Invalid vInt detected (too many bits)");
         }
 
         public override long ReadVLong()
@@ -148,7 +158,7 @@ namespace Lucene.Net.Store
 
         public override void ReadBytes(byte[] b, int offset, int len)
         {
-            Array.Copy(bytes, pos, b, offset, len);
+            Buffer.BlockCopy(bytes, pos, b, offset, len);
             pos += len;
         }
     }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/4c65df01/src/core/Store/DataInput.cs
----------------------------------------------------------------------
diff --git a/src/core/Store/DataInput.cs b/src/core/Store/DataInput.cs
index 115bd44..07310a1 100644
--- a/src/core/Store/DataInput.cs
+++ b/src/core/Store/DataInput.cs
@@ -50,6 +50,16 @@ namespace Lucene.Net.Store
 
         public virtual int ReadVInt()
         {
+            // .NET Port: Going back to original code instead of Java code below due to sbyte/byte diff
+            byte b = ReadByte();
+            int i = b & 0x7F;
+            for (int shift = 7; (b & 0x80) != 0; shift += 7)
+            {
+                b = ReadByte();
+                i |= (b & 0x7F) << shift;
+            }
+            return i;
+
             /* This is the original code of this method,
              * but a Hotspot bug (see LUCENE-2975) corrupts the for-loop if
              * ReadByte() is inlined. So the loop was unwinded!
@@ -61,23 +71,23 @@ namespace Lucene.Net.Store
             }
             return i;
             */
-            byte b = ReadByte();
-            if (b >= 0) return b;
-            int i = b & 0x7F;
-            b = ReadByte();
-            i |= (b & 0x7F) << 7;
-            if (b >= 0) return i;
-            b = ReadByte();
-            i |= (b & 0x7F) << 14;
-            if (b >= 0) return i;
-            b = ReadByte();
-            i |= (b & 0x7F) << 21;
-            if (b >= 0) return i;
-            b = ReadByte();
-            // Warning: the next ands use 0x0F / 0xF0 - beware copy/paste errors:
-            i |= (b & 0x0F) << 28;
-            if ((b & 0xF0) == 0) return i;
-            throw new System.IO.IOException("Invalid vInt detected (too many bits)");
+            //byte b = ReadByte();
+            //if (b >= 0) return b;
+            //int i = b & 0x7F;
+            //b = ReadByte();
+            //i |= (b & 0x7F) << 7;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //i |= (b & 0x7F) << 14;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //i |= (b & 0x7F) << 21;
+            //if (b >= 0) return i;
+            //b = ReadByte();
+            //// Warning: the next ands use 0x0F / 0xF0 - beware copy/paste errors:
+            //i |= (b & 0x0F) << 28;
+            //if ((b & 0xF0) == 0) return i;
+            //throw new System.IO.IOException("Invalid vInt detected (too many bits)");
         }
 
         public virtual long ReadLong()


[11/50] [abbrv] git commit: Port: unit tests. more from util namespace

Posted by mh...@apache.org.
Port: unit tests. more from util namespace


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/14f5ae0c
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/14f5ae0c
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/14f5ae0c

Branch: refs/heads/branch_4x
Commit: 14f5ae0cf2d381cca82e7fb8d47a47524f9b16d9
Parents: d9635bf
Author: James Blair <jm...@gmail.com>
Authored: Tue Jul 16 17:32:28 2013 -0400
Committer: James Blair <jm...@gmail.com>
Committed: Tue Jul 16 17:32:28 2013 -0400

----------------------------------------------------------------------
 test/core/Util/Automaton/TestBasicOperations.cs | 160 ++++++++
 .../Util/Automaton/TestCompiledAutomaton.cs     | 127 ++++++
 test/core/Util/Automaton/TestDeterminism.cs     |  69 ++++
 .../Util/Automaton/TestDeterminizeLexicon.cs    |  51 +++
 .../Util/Automaton/TestLevenshteinAutomata.cs   | 394 +++++++++++++++++++
 test/core/Util/Automaton/TestMinimize.cs        |  51 +++
 .../Util/Automaton/TestSpecialOperations.cs     |  38 ++
 test/core/Util/Automaton/TestUTF32ToUTF8.cs     | 271 +++++++++++++
 test/core/Util/TestWeakIdentityMap.cs           | 276 +++++++++++++
 9 files changed, 1437 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/14f5ae0c/test/core/Util/Automaton/TestBasicOperations.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Automaton/TestBasicOperations.cs b/test/core/Util/Automaton/TestBasicOperations.cs
new file mode 100644
index 0000000..8db8744
--- /dev/null
+++ b/test/core/Util/Automaton/TestBasicOperations.cs
@@ -0,0 +1,160 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Automaton;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Automaton
+{
+    [TestFixture]
+    public class TestBasicOperations : LuceneTestCase
+    {
+        [Test]
+        public void TestStringUnion()
+        {
+            var strings = new List<BytesRef>();
+            for (int i = RandomInts.randomIntBetween(new Random(), 0, 1000); --i >= 0; )
+            {
+                strings.Add(new BytesRef(_TestUtil.RandomUnicodeString(new Random())));
+            }
+
+            strings.Sort();
+            //Collections.Sort(strings);
+            var union = BasicAutomata.MakeStringUnion(strings);
+            Assert.IsTrue(union.Deterministic);
+            Assert.IsTrue(BasicOperations.SameLanguage(union, NaiveUnion(strings)));
+        }
+
+        private static Lucene.Net.Util.Automaton.Automaton NaiveUnion(List<BytesRef> strings)
+        {
+            var eachIndividual = new Lucene.Net.Util.Automaton.Automaton[strings.Count];
+            int i = 0;
+            foreach (var bref in strings)
+            {
+                eachIndividual[i++] = BasicAutomata.MakeString(bref.Utf8ToString());
+            }
+            return BasicOperations.Union(eachIndividual.ToList());
+        }
+
+        /** Test optimization to Concatenate() */
+        [Test]
+        public void TestSingletonConcatenate()
+        {
+            var singleton = BasicAutomata.MakeString("prefix");
+            var expandedSingleton = singleton.CloneExpanded();
+            var other = BasicAutomata.MakeCharRange('5', '7');
+            var concat = BasicOperations.Concatenate(singleton, other);
+            Assert.IsTrue(concat.Deterministic);
+            Assert.IsTrue(BasicOperations.SameLanguage(BasicOperations.Concatenate(expandedSingleton, other), concat));
+        }
+
+        /** Test optimization to Concatenate() to an NFA */
+        [Test]
+        public void TestSingletonNFAConcatenate()
+        {
+            var singleton = BasicAutomata.MakeString("prefix");
+            var expandedSingleton = singleton.CloneExpanded();
+            // an NFA (two transitions for 't' from initial state)
+            var nfa = BasicOperations.Union(BasicAutomata.MakeString("this"),
+                BasicAutomata.MakeString("three"));
+            var concat = BasicOperations.Concatenate(singleton, nfa);
+            Assert.IsFalse(concat.Deterministic);
+            Assert.IsTrue(BasicOperations.SameLanguage(BasicOperations.Concatenate(expandedSingleton, nfa), concat));
+        }
+
+        /** Test optimization to Concatenate() with empty String */
+        [Test]
+        public void TestEmptySingletonConcatenate()
+        {
+            var singleton = BasicAutomata.MakeString("");
+            var expandedSingleton = singleton.CloneExpanded();
+            var other = BasicAutomata.MakeCharRange('5', '7');
+            var concat1 = BasicOperations.Concatenate(expandedSingleton, other);
+            var concat2 = BasicOperations.Concatenate(singleton, other);
+            Assert.IsTrue(concat2.Deterministic);
+            Assert.IsTrue(BasicOperations.SameLanguage(concat1, concat2));
+            Assert.IsTrue(BasicOperations.SameLanguage(other, concat1));
+            Assert.IsTrue(BasicOperations.SameLanguage(other, concat2));
+        }
+
+        /** Test concatenation with empty language returns empty */
+        [Test]
+        public void TestEmptyLanguageConcatenate()
+        {
+            var a = BasicAutomata.MakeString("a");
+            var concat = BasicOperations.Concatenate(a, BasicAutomata.MakeEmpty());
+            Assert.IsTrue(BasicOperations.IsEmpty(concat));
+        }
+
+        /** Test optimization to Concatenate() with empty String to an NFA */
+        [Test]
+        public void TestEmptySingletonNFAConcatenate()
+        {
+            var singleton = BasicAutomata.MakeString("");
+            var expandedSingleton = singleton.CloneExpanded();
+            // an NFA (two transitions for 't' from initial state)
+            var nfa = BasicOperations.Union(BasicAutomata.MakeString("this"),
+                BasicAutomata.MakeString("three"));
+            var concat1 = BasicOperations.Concatenate(expandedSingleton, nfa);
+            var concat2 = BasicOperations.Concatenate(singleton, nfa);
+            Assert.IsFalse(concat2.Deterministic);
+            Assert.IsTrue(BasicOperations.SameLanguage(concat1, concat2));
+            Assert.IsTrue(BasicOperations.SameLanguage(nfa, concat1));
+            Assert.IsTrue(BasicOperations.SameLanguage(nfa, concat2));
+        }
+
+        /** Test singletons work correctly */
+        [Test]
+        public void TestSingleton()
+        {
+            var singleton = BasicAutomata.MakeString("foobar");
+            var expandedSingleton = singleton.CloneExpanded();
+            Assert.IsTrue(BasicOperations.SameLanguage(singleton, expandedSingleton));
+
+            singleton = BasicAutomata.MakeString("\ud801\udc1c");
+            expandedSingleton = singleton.CloneExpanded();
+            Assert.IsTrue(BasicOperations.SameLanguage(singleton, expandedSingleton));
+        }
+
+        [Test]
+        public void TestGetRandomAcceptedString()
+        {
+            int ITER1 = AtLeast(100);
+            int ITER2 = AtLeast(100);
+            for (var i = 0; i < ITER1; i++)
+            {
+
+                var re = new RegExp(AutomatonTestUtil.randomRegexp(new Random()), RegExp.NONE);
+                var a = re.ToAutomaton();
+                Assert.IsFalse(BasicOperations.IsEmpty(a));
+
+                AutomatonTestUtil.RandomAcceptedStrings rx = new AutomatonTestUtil.RandomAcceptedStrings(a);
+                for (var j = 0; j < ITER2; j++)
+                {
+                    int[] acc = null;
+                    try
+                    {
+                        acc = rx.GetRandomAcceptedString(new Random());
+                        String s = UnicodeUtil.NewString(acc, 0, acc.Length);
+                        Assert.IsTrue(BasicOperations.Run(a, s));
+                    }
+                    catch (Exception e)
+                    {
+                        Console.WriteLine("regexp: " + re);
+                        if (acc != null)
+                        {
+                            Console.WriteLine("fail acc re=" + re + " count=" + acc.Length);
+                            for (int k = 0; k < acc.Length; k++)
+                            {
+                                Console.WriteLine("  " + Integer.ToHexString(acc[k]));
+                            }
+                        }
+                        throw;
+                    }
+                }
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/14f5ae0c/test/core/Util/Automaton/TestCompiledAutomaton.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Automaton/TestCompiledAutomaton.cs b/test/core/Util/Automaton/TestCompiledAutomaton.cs
new file mode 100644
index 0000000..b2ea288
--- /dev/null
+++ b/test/core/Util/Automaton/TestCompiledAutomaton.cs
@@ -0,0 +1,127 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Automaton;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Automaton
+{
+    [TestFixture]
+    public class TestCompiledAutomaton : LuceneTestCase
+    {
+        private CompiledAutomaton Build(params string[] strings)
+        {
+            var terms = strings.Select(s => new BytesRef(s)).ToList();
+            terms.Sort();
+            //Collections.sort(terms);
+            var a = DaciukMihovAutomatonBuilder.Build(terms);
+            return new CompiledAutomaton(a, true, false);
+        }
+
+        private void TestFloor(CompiledAutomaton c, string input, string expected)
+        {
+            var b = new BytesRef(input);
+            var result = c.Floor(b, b);
+            if (expected == null)
+            {
+                assertNull(result);
+            }
+            else
+            {
+                assertNotNull(result);
+                assertEquals("actual=" + result.Utf8ToString() + " vs expected=" + expected + " (input=" + input + ")",
+                             result, new BytesRef(expected));
+            }
+        }
+
+        private void TestTerms(string[] terms)
+        {
+            var c = Build(terms);
+            var termBytes = new BytesRef[terms.Length];
+            for (var idx = 0; idx < terms.Length; idx++)
+            {
+                termBytes[idx] = new BytesRef(terms[idx]);
+            }
+            termBytes.ToList().Sort();
+            //Arrays.sort(termBytes);
+
+            if (VERBOSE)
+            {
+                Console.WriteLine("\nTEST: terms in unicode order");
+                foreach (var t in termBytes)
+                {
+                    Console.WriteLine("  " + t.Utf8ToString());
+                }
+                //System.out.println(c.utf8.toDot());
+            }
+
+            for (var iter = 0; iter < 100 * RANDOM_MULTIPLIER; iter++)
+            {
+                var s = new Random().Next(10) == 1 ? terms[new Random().Next(terms.Length)] : RandomString();
+                if (VERBOSE)
+                {
+                    Console.WriteLine("\nTEST: floor(" + s + ")");
+                }
+                int loc = Arrays.BinarySearch(termBytes, new BytesRef(s));
+                string expected;
+                if (loc >= 0)
+                {
+                    expected = s;
+                }
+                else
+                {
+                    // term doesn't exist
+                    loc = -(loc + 1);
+                    if (loc == 0)
+                    {
+                        expected = null;
+                    }
+                    else
+                    {
+                        expected = termBytes[loc - 1].Utf8ToString();
+                    }
+                }
+                if (VERBOSE)
+                {
+                    Console.WriteLine("  expected=" + expected);
+                }
+                TestFloor(c, s, expected);
+            }
+        }
+
+        [Test]
+        public void TestRandom()
+        {
+            int numTerms = AtLeast(400);
+            var terms = new HashSet<string>();
+            while (terms.Count != numTerms)
+            {
+                terms.Add(RandomString());
+            }
+            TestTerms(terms.ToArray());
+        }
+
+        private string RandomString()
+        {
+            // return _TestUtil.randomSimpleString(random);
+            return _TestUtil.RandomRealisticUnicodeString(new Random());
+        }
+
+        [Test]
+        public void TestBasic()
+        {
+            var c = Build("fob", "foo", "goo");
+            TestFloor(c, "goo", "goo");
+            TestFloor(c, "ga", "foo");
+            TestFloor(c, "g", "foo");
+            TestFloor(c, "foc", "fob");
+            TestFloor(c, "foz", "foo");
+            TestFloor(c, "f", null);
+            TestFloor(c, "", null);
+            TestFloor(c, "aa", null);
+            TestFloor(c, "zzz", "goo");
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/14f5ae0c/test/core/Util/Automaton/TestDeterminism.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Automaton/TestDeterminism.cs b/test/core/Util/Automaton/TestDeterminism.cs
new file mode 100644
index 0000000..9106b6c
--- /dev/null
+++ b/test/core/Util/Automaton/TestDeterminism.cs
@@ -0,0 +1,69 @@
+using System;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Automaton;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Automaton
+{
+    [TestFixture]
+    public class TestDeterminism : LuceneTestCase
+    {
+        /** Test a bunch of random regular expressions */
+        [Test]
+        public void TestRegexps()
+        {
+            int num = AtLeast(500);
+            for (var i = 0; i < num; i++)
+                AssertAutomaton(new RegExp(AutomatonTestUtil.RandomRegexp(new Random()), RegExp.NONE).ToAutomaton());
+        }
+
+        /** Test against a simple, unoptimized det */
+        [Test]
+        public void TestAgainstSimple()
+        {
+            int num = AtLeast(200);
+            for (var i = 0; i < num; i++)
+            {
+                var a = AutomatonTestUtil.RandomAutomaton(new Random());
+                var b = a.Clone();
+                AutomatonTestUtil.DeterminizeSimple(a);
+                b.Deterministic = false; // force det
+                b.Determinize();
+                // TODO: more verifications possible?
+                Assert.IsTrue(BasicOperations.SameLanguage(a, b));
+            }
+        }
+
+        private static void AssertAutomaton(Lucene.Net.Util.Automaton.Automaton a)
+        {
+            var clone = a.Clone();
+            // complement(complement(a)) = a
+            var equivalent = BasicOperations.Complement(BasicOperations.Complement(a));
+            Assert.IsTrue(BasicOperations.SameLanguage(a, equivalent));
+
+            // a union a = a
+            equivalent = BasicOperations.Union(a, clone);
+            Assert.IsTrue(BasicOperations.SameLanguage(a, equivalent));
+
+            // a intersect a = a
+            equivalent = BasicOperations.Intersection(a, clone);
+            Assert.IsTrue(BasicOperations.SameLanguage(a, equivalent));
+
+            // a minus a = empty
+            var empty = BasicOperations.Minus(a, clone);
+            Assert.IsTrue(BasicOperations.IsEmpty(empty));
+
+            // as long as don't accept the empty string
+            // then optional(a) - empty = a
+            if (!BasicOperations.Run(a, ""))
+            {
+                //System.out.println("Test " + a);
+                var optional = BasicOperations.Optional(a);
+                //System.out.println("optional " + optional);
+                equivalent = BasicOperations.Minus(optional, BasicAutomata.MakeEmptyString());
+                //System.out.println("equiv " + equivalent);
+                Assert.IsTrue(BasicOperations.SameLanguage(a, equivalent));
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/14f5ae0c/test/core/Util/Automaton/TestDeterminizeLexicon.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Automaton/TestDeterminizeLexicon.cs b/test/core/Util/Automaton/TestDeterminizeLexicon.cs
new file mode 100644
index 0000000..a8c11af
--- /dev/null
+++ b/test/core/Util/Automaton/TestDeterminizeLexicon.cs
@@ -0,0 +1,51 @@
+using System;
+using System.Collections.Generic;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Automaton;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Automaton
+{
+    [TestFixture]
+    public class TestDeterminizeLexicon : LuceneTestCase
+    {
+        private List<Lucene.Net.Util.Automaton.Automaton> automata = new List<Lucene.Net.Util.Automaton.Automaton>();
+        private List<string> terms = new List<string>();
+
+        public void testLexicon()
+        {
+            int num = AtLeast(1);
+            for (var i = 0; i < num; i++)
+            {
+                automata.Clear();
+                terms.Clear();
+                for (var j = 0; j < 5000; j++)
+                {
+                    string randomString = _TestUtil.RandomUnicodeString(new Random());
+                    terms.Add(randomString);
+                    automata.Add(BasicAutomata.MakeString(randomString));
+                }
+                assertLexicon();
+            }
+        }
+
+        public void assertLexicon()
+        {
+            Collections.Shuffle(automata, new Random());
+            var lex = BasicOperations.Union(automata);
+            lex.Determinize();
+            assertTrue(SpecialOperations.IsFinite(lex));
+            foreach (var s in terms)
+            {
+                assertTrue(BasicOperations.Run(lex, s));
+            }
+            var lexByte = new ByteRunAutomaton(lex);
+            foreach (var s in terms)
+            {
+                var bytes = s.GetBytes("UTF-8");
+                assertTrue(lexByte.Run(bytes, 0, bytes.Length));
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/14f5ae0c/test/core/Util/Automaton/TestLevenshteinAutomata.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Automaton/TestLevenshteinAutomata.cs b/test/core/Util/Automaton/TestLevenshteinAutomata.cs
new file mode 100644
index 0000000..b325325
--- /dev/null
+++ b/test/core/Util/Automaton/TestLevenshteinAutomata.cs
@@ -0,0 +1,394 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Automaton;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Automaton
+{
+    [TestFixture]
+    public class TestLevenshteinAutomata : LuceneTestCase
+    {
+        [Test]
+        public void TestLev0()
+        {
+            AssertLev("", 0);
+            AssertCharVectors(0);
+        }
+
+        [Test]
+        public void TestLev1()
+        {
+            AssertLev("", 1);
+            AssertCharVectors(1);
+        }
+
+        [Test]
+        public void TestLev2()
+        {
+            AssertLev("", 2);
+            AssertCharVectors(2);
+        }
+
+        // LUCENE-3094
+        [Test]
+        public void TestNoWastedStates()
+        {
+            AutomatonTestUtil.assertNoDetachedStates(new LevenshteinAutomata("abc", false).ToAutomaton(1));
+        }
+
+        /** 
+         * Tests all possible characteristic vectors for some n
+         * This exhaustively tests the parametric transitions tables.
+         */
+        private void AssertCharVectors(int n)
+        {
+            var k = 2 * n + 1;
+            // use k + 2 as the exponent: the formula generates different transitions
+            // for w, w-1, w-2
+            var limit = (int)Math.Pow(2, k + 2);
+            for (var i = 0; i < limit; i++)
+            {
+                String encoded = Integer.toString(i, 2);
+                AssertLev(encoded, n);
+            }
+        }
+
+        /**
+         * Builds a DFA for some string, and checks all Lev automata
+         * up to some maximum distance.
+         */
+        private void AssertLev(String s, int maxDistance)
+        {
+            var builder = new LevenshteinAutomata(s, false);
+            var tbuilder = new LevenshteinAutomata(s, true);
+            var automata = new Lucene.Net.Util.Automaton.Automaton[maxDistance + 1];
+            var tautomata = new Lucene.Net.Util.Automaton.Automaton[maxDistance + 1];
+            for (var n = 0; n < automata.Length; n++)
+            {
+                automata[n] = builder.ToAutomaton(n);
+                tautomata[n] = tbuilder.ToAutomaton(n);
+                Assert.IsNotNull(automata[n]);
+                Assert.IsNotNull(tautomata[n]);
+                Assert.IsTrue(automata[n].Deterministic);
+                Assert.IsTrue(tautomata[n].Deterministic);
+                Assert.IsTrue(SpecialOperations.IsFinite(automata[n]));
+                Assert.IsTrue(SpecialOperations.IsFinite(tautomata[n]));
+                AutomatonTestUtil.assertNoDetachedStates(automata[n]);
+                AutomatonTestUtil.assertNoDetachedStates(tautomata[n]);
+                // check that the dfa for n-1 accepts a subset of the dfa for n
+                if (n > 0)
+                {
+                    Assert.IsTrue(automata[n - 1].SubsetOf(automata[n]));
+                    Assert.IsTrue(automata[n - 1].SubsetOf(tautomata[n]));
+                    Assert.IsTrue(tautomata[n - 1].SubsetOf(automata[n]));
+                    Assert.IsTrue(tautomata[n - 1].SubsetOf(tautomata[n]));
+                    Assert.AreNotSame(automata[n - 1], automata[n]);
+                }
+                // check that Lev(N) is a subset of LevT(N)
+                Assert.IsTrue(automata[n].SubsetOf(tautomata[n]));
+                // special checks for specific n
+                switch (n)
+                {
+                    case 0:
+                        // easy, matches the string itself
+                        Assert.IsTrue(BasicOperations.SameLanguage(BasicAutomata.MakeString(s), automata[0]));
+                        Assert.IsTrue(BasicOperations.SameLanguage(BasicAutomata.MakeString(s), tautomata[0]));
+                        break;
+                    case 1:
+                        // generate a lev1 naively, and check the accepted lang is the same.
+                        Assert.IsTrue(BasicOperations.SameLanguage(NaiveLev1(s), automata[1]));
+                        Assert.IsTrue(BasicOperations.SameLanguage(NaiveLev1T(s), tautomata[1]));
+                        break;
+                    default:
+                        AssertBruteForce(s, automata[n], n);
+                        AssertBruteForceT(s, tautomata[n], n);
+                        break;
+                }
+            }
+        }
+
+        private Lucene.Net.Util.Automaton.Automaton NaiveLev1(String s)
+        {
+            var a = BasicAutomata.MakeString(s);
+            a = BasicOperations.Union(a, InsertionsOf(s));
+            MinimizationOperations.Minimize(a);
+            a = BasicOperations.Union(a, DeletionsOf(s));
+            MinimizationOperations.Minimize(a);
+            a = BasicOperations.Union(a, SubstitutionsOf(s));
+            MinimizationOperations.Minimize(a);
+
+            return a;
+        }
+
+        private Lucene.Net.Util.Automaton.Automaton NaiveLev1T(String s)
+        {
+            var a = NaiveLev1(s);
+            a = BasicOperations.Union(a, TranspositionsOf(s));
+            MinimizationOperations.Minimize(a);
+            return a;
+        }
+
+        private Lucene.Net.Util.Automaton.Automaton InsertionsOf(String s)
+        {
+            var list = new List<Lucene.Net.Util.Automaton.Automaton>();
+
+            for (var i = 0; i <= s.Length; i++)
+            {
+                var a = BasicAutomata.MakeString(s.Substring(0, i));
+                a = BasicOperations.Concatenate(a, BasicAutomata.MakeAnyChar());
+                a = BasicOperations.Concatenate(a, BasicAutomata.MakeString(s
+                    .Substring(i)));
+                list.Add(a);
+            }
+
+            var automaton = BasicOperations.Union(list);
+            MinimizationOperations.Minimize(automaton);
+            return automaton;
+        }
+
+        private Lucene.Net.Util.Automaton.Automaton DeletionsOf(String s)
+        {
+            var list = new List<Lucene.Net.Util.Automaton.Automaton>();
+
+            for (var i = 0; i < s.Length; i++)
+            {
+                var a = BasicAutomata.MakeString(s.Substring(0, i));
+                a = BasicOperations.Concatenate(a, BasicAutomata.MakeString(s
+                    .Substring(i + 1)));
+                a.ExpandSingleton();
+                list.Add(a);
+            }
+
+            var automaton = BasicOperations.Union(list);
+            MinimizationOperations.Minimize(automaton);
+            return automaton;
+        }
+
+        private Lucene.Net.Util.Automaton.Automaton SubstitutionsOf(String s)
+        {
+            var list = new List<Lucene.Net.Util.Automaton.Automaton>();
+
+            for (var i = 0; i < s.Length; i++)
+            {
+                var a = BasicAutomata.MakeString(s.Substring(0, i));
+                a = BasicOperations.Concatenate(a, BasicAutomata.MakeAnyChar());
+                a = BasicOperations.Concatenate(a, BasicAutomata.MakeString(s
+                    .Substring(i + 1)));
+                list.Add(a);
+            }
+
+            var automaton = BasicOperations.Union(list);
+            MinimizationOperations.Minimize(automaton);
+            return automaton;
+        }
+
+        private Lucene.Net.Util.Automaton.Automaton TranspositionsOf(String s)
+        {
+            if (s.Length < 2)
+                return BasicAutomata.MakeEmpty();
+            var list = new List<Lucene.Net.Util.Automaton.Automaton>();
+            for (var i = 0; i < s.Length - 1; i++)
+            {
+                var sb = new StringBuilder();
+                sb.Append(s.Substring(0, i));
+                sb.Append(s[i + 1]);
+                sb.Append(s[i]);
+                sb.Append(s.Substring(i + 2, s.Length));
+                var st = sb.ToString();
+                if (!st.Equals(s))
+                    list.Add(BasicAutomata.MakeString(st));
+            }
+            var a = BasicOperations.Union(list);
+            MinimizationOperations.Minimize(a);
+            return a;
+        }
+
+        private void AssertBruteForce(String input, Lucene.Net.Util.Automaton.Automaton dfa, int distance)
+        {
+            var ra = new CharacterRunAutomaton(dfa);
+            var maxLen = input.Length + distance + 1;
+            var maxNum = (int)Math.Pow(2, maxLen);
+            for (var i = 0; i < maxNum; i++)
+            {
+                String encoded = Integer.toString(i, 2);
+                var accepts = ra.Run(encoded);
+                if (accepts)
+                {
+                    Assert.IsTrue(GetDistance(input, encoded) <= distance);
+                }
+                else
+                {
+                    Assert.IsTrue(GetDistance(input, encoded) > distance);
+                }
+            }
+        }
+
+        private void AssertBruteForceT(String input, Lucene.Net.Util.Automaton.Automaton dfa, int distance)
+        {
+            var ra = new CharacterRunAutomaton(dfa);
+            var maxLen = input.Length + distance + 1;
+            var maxNum = (int)Math.Pow(2, maxLen);
+            for (var i = 0; i < maxNum; i++)
+            {
+                String encoded = Integer.toString(i, 2);
+                var accepts = ra.Run(encoded);
+                if (accepts)
+                {
+                    Assert.IsTrue(GetTDistance(input, encoded) <= distance);
+                }
+                else
+                {
+                    Assert.IsTrue(GetTDistance(input, encoded) > distance);
+                }
+            }
+        }
+
+        //*****************************
+        // Compute Levenshtein distance: see org.apache.commons.lang.StringUtils#getLevenshteinDistance(String, String)
+        //*****************************
+        private int GetDistance(String target, String other)
+        {
+            char[] sa;
+            int n;
+            int[] p; //'previous' cost array, horizontally
+            int[] d; // cost array, horizontally
+            int[] _d; //placeholder to assist in swapping p and d
+
+            /*
+               The difference between this impl. and the previous is that, rather
+               than creating and retaining a matrix of size s.length()+1 by t.length()+1,
+               we maintain two single-dimensional arrays of length s.length()+1.  The first, d,
+               is the 'current working' distance array that maintains the newest distance cost
+               counts as we iterate through the characters of String s.  Each time we increment
+               the index of String t we are comparing, d is copied to p, the second int[].  Doing so
+               allows us to retain the previous cost counts as required by the algorithm (taking
+               the minimum of the cost count to the left, up one, and diagonally up and to the left
+               of the current cost count being calculated).  (Note that the arrays aren't really
+               copied anymore, just switched...this is clearly much better than cloning an array
+               or doing a System.arraycopy() each time  through the outer loop.)
+
+               Effectively, the difference between the two implementations is this one does not
+               cause an out of memory condition when calculating the LD over two very large strings.
+             */
+
+            sa = target.ToCharArray();
+            n = sa.Length;
+            p = new int[n + 1];
+            d = new int[n + 1];
+
+            var m = other.Length;
+            if (n == 0 || m == 0)
+            {
+                if (n == m)
+                {
+                    return 0;
+                }
+                else
+                {
+                    return Math.Max(n, m);
+                }
+            }
+
+
+            // indexes into strings s and t
+            int i; // iterates through s
+            int j; // iterates through t
+
+            char t_j; // jth character of t
+
+            int cost; // cost
+
+            for (i = 0; i <= n; i++)
+            {
+                p[i] = i;
+            }
+
+            for (j = 1; j <= m; j++)
+            {
+                t_j = other[j - 1];
+                d[0] = j;
+
+                for (i = 1; i <= n; i++)
+                {
+                    cost = sa[i - 1] == t_j ? 0 : 1;
+                    // minimum of cell to the left+1, to the top+1, diagonally left and up +cost
+                    d[i] = Math.Min(Math.Min(d[i - 1] + 1, p[i] + 1), p[i - 1] + cost);
+                }
+
+                // copy current distance counts to 'previous row' distance counts
+                _d = p;
+                p = d;
+                d = _d;
+            }
+
+            // our last action in the above loop was to switch d and p, so p now
+            // actually has the most recent cost counts
+            return Math.Abs(p[n]);
+        }
+
+        private int GetTDistance(String target, String other)
+        {
+            char[] sa;
+            int n;
+            int[,] d; // cost array
+
+            sa = target.ToCharArray();
+            n = sa.Length;
+            int m = other.Length;
+            d = new int[n + 1, m + 1];
+
+            if (n == 0 || m == 0)
+            {
+                if (n == m)
+                {
+                    return 0;
+                }
+                else
+                {
+                    return Math.Max(n, m);
+                }
+            }
+
+            // indexes into strings s and t
+            int i; // iterates through s
+            int j; // iterates through t
+
+            char t_j; // jth character of t
+
+            int cost; // cost
+
+            for (i = 0; i <= n; i++)
+            {
+                d[i,0] = i;
+            }
+
+            for (j = 0; j <= m; j++)
+            {
+                d[0,j] = j;
+            }
+
+            for (j = 1; j <= m; j++)
+            {
+                t_j = other[j - 1];
+
+                for (i = 1; i <= n; i++)
+                {
+                    cost = sa[i - 1] == t_j ? 0 : 1;
+                    // minimum of cell to the left+1, to the top+1, diagonally left and up +cost
+                    d[i,j] = Math.Min(Math.Min(d[i - 1,j] + 1, d[i,j - 1] + 1), d[i - 1,j - 1] + cost);
+                    // transposition
+                    if (i > 1 && j > 1 && target[i - 1] == other[j - 2] && target[i - 2] == other[j - 1])
+                    {
+                        d[i,j] = Math.Min(d[i,j], d[i - 2,j - 2] + cost);
+                    }
+                }
+            }
+
+            // our last action in the above loop was to switch d and p, so p now
+            // actually has the most recent cost counts
+            return Math.Abs(d[n,m]);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/14f5ae0c/test/core/Util/Automaton/TestMinimize.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Automaton/TestMinimize.cs b/test/core/Util/Automaton/TestMinimize.cs
new file mode 100644
index 0000000..c3aff35
--- /dev/null
+++ b/test/core/Util/Automaton/TestMinimize.cs
@@ -0,0 +1,51 @@
+using System;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Automaton;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Automaton
+{
+    [TestFixture]
+    public class TestMinimize : LuceneTestCase
+    {
+        /** the minimal and non-minimal are compared to ensure they are the same. */
+        [Test]
+        public void Test()
+        {
+            int num = AtLeast(200);
+            for (var i = 0; i < num; i++)
+            {
+                var a = AutomatonTestUtil.RandomAutomaton(new Random());
+                var b = a.Clone();
+                MinimizationOperations.Minimize(b);
+                Assert.IsTrue(BasicOperations.SameLanguage(a, b));
+            }
+        }
+
+        /** compare minimized against minimized with a slower, simple impl.
+         * we check not only that they are the same, but that #states/#transitions
+         * are the same. */
+        [Test]
+        public void TestAgainstBrzozowski()
+        {
+            int num = AtLeast(200);
+            for (var i = 0; i < num; i++)
+            {
+                var a = AutomatonTestUtil.RandomAutomaton(new Random());
+                AutomatonTestUtil.MinimizeSimple(a);
+                var b = a.Clone();
+                MinimizationOperations.Minimize(b);
+                Assert.IsTrue(BasicOperations.SameLanguage(a, b));
+                Assert.Equals(a.GetNumberOfStates(), b.GetNumberOfStates());
+                Assert.Equals(a.GetNumberOfTransitions(), b.GetNumberOfTransitions());
+            }
+        }
+
+        /** n^2 space usage in Hopcroft minimization? */
+        [Test]
+        public void TestMinimizeHuge()
+        {
+            new RegExp("+-*(A|.....|BC)*]", RegExp.NONE).ToAutomaton();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/14f5ae0c/test/core/Util/Automaton/TestSpecialOperations.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Automaton/TestSpecialOperations.cs b/test/core/Util/Automaton/TestSpecialOperations.cs
new file mode 100644
index 0000000..29f8efc
--- /dev/null
+++ b/test/core/Util/Automaton/TestSpecialOperations.cs
@@ -0,0 +1,38 @@
+using System;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Automaton;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Automaton
+{
+    [TestFixture]
+    public class TestSpecialOperations : LuceneTestCase
+    {
+        [Test]
+        public void TestIsFinite()
+        {
+            int num = AtLeast(200);
+            for (var i = 0; i < num; i++)
+            {
+                var a = AutomatonTestUtil.randomAutomaton(new Random());
+                var b = a.clone();
+                Assert.Equals(AutomatonTestUtil.isFiniteSlow(a), SpecialOperations.IsFinite(b));
+            }
+        }
+
+        [Test]
+        public void TestFiniteStrings()
+        {
+            var a = BasicOperations.Union(BasicAutomata.MakeString("dog"), BasicAutomata.MakeString("duck"));
+            MinimizationOperations.Minimize(a);
+            var strings = SpecialOperations.GetFiniteStrings(a, -1);
+            assertEquals(2, strings.Count);
+            var dog = new IntsRef();
+            Util.ToIntsRef(new BytesRef("dog"), dog);
+            assertTrue(strings.Contains(dog));
+            var duck = new IntsRef();
+            Util.ToIntsRef(new BytesRef("duck"), duck);
+            assertTrue(strings.Contains(duck));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/14f5ae0c/test/core/Util/Automaton/TestUTF32ToUTF8.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Automaton/TestUTF32ToUTF8.cs b/test/core/Util/Automaton/TestUTF32ToUTF8.cs
new file mode 100644
index 0000000..b2bdaca
--- /dev/null
+++ b/test/core/Util/Automaton/TestUTF32ToUTF8.cs
@@ -0,0 +1,271 @@
+using System;
+using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Automaton;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Automaton
+{
+    [TestFixture]
+    public class TestUTF32ToUTF8 : LuceneTestCase
+    {
+        [SetUp]
+        public override void SetUp()
+        {
+            base.SetUp();
+        }
+
+        private static readonly int MAX_UNICODE = 0x10FFFF;
+
+        internal readonly BytesRef b = new BytesRef(4);
+
+        private bool Matches(ByteRunAutomaton a, int code)
+        {
+            char[] chars = Character.ToChars(code);
+            UnicodeUtil.UTF16toUTF8(chars, 0, chars.Length, b);
+            return a.Run(b.bytes, 0, b.length);
+        }
+
+        private void TestOne(Random r, ByteRunAutomaton a, int startCode, int endCode, int iters)
+        {
+
+            // Verify correct ints are accepted
+            int nonSurrogateCount;
+            bool ovSurStart;
+            if (endCode < UnicodeUtil.UNI_SUR_HIGH_START ||
+                startCode > UnicodeUtil.UNI_SUR_LOW_END)
+            {
+                // no overlap w/ surrogates
+                nonSurrogateCount = endCode - startCode + 1;
+                ovSurStart = false;
+            }
+            else if (IsSurrogate(startCode))
+            {
+                // start of range overlaps surrogates
+                nonSurrogateCount = endCode - startCode + 1 - (UnicodeUtil.UNI_SUR_LOW_END - startCode + 1);
+                ovSurStart = false;
+            }
+            else if (IsSurrogate(endCode))
+            {
+                // end of range overlaps surrogates
+                ovSurStart = true;
+                nonSurrogateCount = endCode - startCode + 1 - (endCode - UnicodeUtil.UNI_SUR_HIGH_START + 1);
+            }
+            else
+            {
+                // range completely subsumes surrogates
+                ovSurStart = true;
+                nonSurrogateCount = endCode - startCode + 1 - (UnicodeUtil.UNI_SUR_LOW_END - UnicodeUtil.UNI_SUR_HIGH_START + 1);
+            }
+
+            //assert nonSurrogateCount > 0;
+
+            for (var iter = 0; iter < iters; iter++)
+            {
+                // pick random code point in-range
+
+                int code = startCode + r.Next(nonSurrogateCount);
+                if (IsSurrogate(code))
+                {
+                    if (ovSurStart)
+                    {
+                        code = UnicodeUtil.UNI_SUR_LOW_END + 1 + (code - UnicodeUtil.UNI_SUR_HIGH_START);
+                    }
+                    else
+                    {
+                        code = UnicodeUtil.UNI_SUR_LOW_END + 1 + (code - startCode);
+                    }
+                }
+
+                //assert code >= startCode && code <= endCode: "code=" + code + " start=" + startCode + " end=" + endCode;
+                //assert !IsSurrogate(code);
+
+                assertTrue("DFA for range " + startCode + "-" + endCode + " failed to match code=" + code,
+                           Matches(a, code));
+            }
+
+            // Verify invalid ints are not accepted
+            var invalidRange = MAX_UNICODE - (endCode - startCode + 1);
+            if (invalidRange > 0)
+            {
+                for (var iter = 0; iter < iters; iter++)
+                {
+                    int x = _TestUtil.NextInt(r, 0, invalidRange - 1);
+                    int code;
+                    if (x >= startCode)
+                    {
+                        code = endCode + 1 + x - startCode;
+                    }
+                    else
+                    {
+                        code = x;
+                    }
+                    if ((code >= UnicodeUtil.UNI_SUR_HIGH_START && code <= UnicodeUtil.UNI_SUR_HIGH_END) |
+                        (code >= UnicodeUtil.UNI_SUR_LOW_START && code <= UnicodeUtil.UNI_SUR_LOW_END))
+                    {
+                        iter--;
+                        continue;
+                    }
+                    Assert.IsFalse(Matches(a, code), "DFA for range " + startCode + "-" + endCode + " matched invalid code=" + code);
+
+                }
+            }
+        }
+
+        // Evenly picks random code point from the 4 "buckets"
+        // (bucket = same #bytes when encoded to utf8)
+        private int GetCodeStart(Random r)
+        {
+            switch (r.Next(4))
+            {
+                case 0:
+                    return _TestUtil.NextInt(r, 0, 128);
+                case 1:              
+                    return _TestUtil.NextInt(r, 128, 2048);
+                case 2:              
+                    return _TestUtil.NextInt(r, 2048, 65536);
+                default:             
+                    return _TestUtil.NextInt(r, 65536, 1 + MAX_UNICODE);
+            }
+        }
+
+        private static bool IsSurrogate(int code)
+        {
+            return code >= UnicodeUtil.UNI_SUR_HIGH_START && code <= UnicodeUtil.UNI_SUR_LOW_END;
+        }
+
+        [Test]
+        public void TestRandomRanges()
+        {
+            var r = new Random();
+            int ITERS = AtLeast(10);
+            int ITERS_PER_DFA = AtLeast(100);
+            for (int iter = 0; iter < ITERS; iter++)
+            {
+                int x1 = GetCodeStart(r);
+                int x2 = GetCodeStart(r);
+                int startCode, endCode;
+
+                if (x1 < x2)
+                {
+                    startCode = x1;
+                    endCode = x2;
+                }
+                else
+                {
+                    startCode = x2;
+                    endCode = x1;
+                }
+
+                if (IsSurrogate(startCode) && IsSurrogate(endCode))
+                {
+                    iter--;
+                    continue;
+                }
+
+                var a = new Lucene.Net.Util.Automaton.Automaton();
+                var end = new State {Accept = true};
+                a.InitialState.AddTransition(new Transition(startCode, endCode, end));
+                a.Deterministic = true;
+
+                TestOne(r, new ByteRunAutomaton(a), startCode, endCode, ITERS_PER_DFA);
+            }
+        }
+
+        [Test]
+        public void TestSpecialCase()
+        {
+            var re = new RegExp(".?");
+            var automaton = re.ToAutomaton();
+            var cra = new CharacterRunAutomaton(automaton);
+            var bra = new ByteRunAutomaton(automaton);
+            // make sure character dfa accepts empty string
+            assertTrue(cra.IsAccept(cra.InitialState));
+            assertTrue(cra.Run(""));
+            assertTrue(cra.Run(new char[0], 0, 0));
+
+            // make sure byte dfa accepts empty string
+            assertTrue(bra.IsAccept(bra.InitialState));
+            assertTrue(bra.Run(new sbyte[0], 0, 0));
+        }
+
+        [Test]
+        public void TestSpecialCase2()
+        {
+            var re = new RegExp(".+\u0775");
+            var input = "\ufadc\ufffd\ub80b\uda5a\udc68\uf234\u0056\uda5b\udcc1\ufffd\ufffd\u0775";
+            var automaton = re.ToAutomaton();
+            var cra = new CharacterRunAutomaton(automaton);
+            var bra = new ByteRunAutomaton(automaton);
+
+            assertTrue(cra.Run(input));
+
+            byte[] bytes = input.GetBytes("UTF-8");
+            assertTrue(bra.Run(bytes, 0, bytes.Length)); // this one fails!
+        }
+
+        [Test]
+        public void TestSpecialCase3()
+        {
+            var re = new RegExp("(\\鯺)*(.)*\\Ӕ");
+            var input = "\u5cfd\ufffd\ub2f7\u0033\ue304\u51d7\u3692\udb50\udfb3\u0576\udae2\udc62\u0053\u0449\u04d4";
+            var automaton = re.ToAutomaton();
+            var cra = new CharacterRunAutomaton(automaton);
+            var bra = new ByteRunAutomaton(automaton);
+
+            assertTrue(cra.Run(input));
+
+            byte[] bytes = input.GetBytes("UTF-8");
+            assertTrue(bra.Run(bytes, 0, bytes.Length));
+        }
+
+        [Test]
+        public void TestRandomRegexes()
+        {
+            int num = AtLeast(250);
+            for (var i = 0; i < num; i++)
+            {
+                AssertAutomaton(new RegExp(AutomatonTestUtil.randomRegexp(new Random()), RegExp.NONE).ToAutomaton());
+            }
+        }
+
+        private void AssertAutomaton(Lucene.Net.Util.Automaton.Automaton automaton)
+        {
+            var cra = new CharacterRunAutomaton(automaton);
+            var bra = new ByteRunAutomaton(automaton);
+            AutomatonTestUtil.RandomAcceptedStrings ras = new AutomatonTestUtil.RandomAcceptedStrings(automaton);
+
+            int num = AtLeast(1000);
+            for (int i = 0; i < num; i++)
+            {
+                string str;
+                if (new Random().NextBool())
+                {
+                    // likely not accepted
+                    str = _TestUtil.RandomUnicodeString(new Random());
+                }
+                else
+                {
+                    // will be accepted
+                    int[] codepoints = ras.GetRandomAcceptedString(new Random());
+                    try
+                    {
+                        str = UnicodeUtil.NewString(codepoints, 0, codepoints.Length);
+                    }
+                    catch (Exception e)
+                    {
+                        Console.WriteLine(codepoints.Length + " codepoints:");
+                        for (int j = 0; j < codepoints.Length; j++)
+                        {
+                            Console.WriteLine("  " + Integer.toHexString(codepoints[j]));
+                        }
+                        throw e;
+                    }
+                }
+                var bytes = str.GetBytes("UTF-8");
+                assertEquals(cra.Run(str), bra.Run(bytes, 0, bytes.Length));
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/14f5ae0c/test/core/Util/TestWeakIdentityMap.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestWeakIdentityMap.cs b/test/core/Util/TestWeakIdentityMap.cs
new file mode 100644
index 0000000..2aed402
--- /dev/null
+++ b/test/core/Util/TestWeakIdentityMap.cs
@@ -0,0 +1,276 @@
+using System;
+using System.Threading;
+using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestWeakIdentityMap : LuceneTestCase
+    {
+        [Test]
+        public void TestSimpleHashMap()
+        {
+            WeakIdentityMap<string, string> map =
+              WeakIdentityMap<string, string>.NewHashMap(new Random().NextBool());
+            // we keep strong references to the keys,
+            // so WeakIdentityMap will not forget about them:
+            string key1 = "foo";
+            string key2 = "foo";
+            string key3 = "foo";
+
+            Assert.AreNotSame(key1, key2);
+            Assert.AreEqual(key1, key2);
+            Assert.AreNotSame(key1, key3);
+            Assert.AreEqual(key1, key3);
+            Assert.AreNotSame(key2, key3);
+            Assert.AreEqual(key2, key3);
+
+            // try null key & check its iterator also return null:
+            map[null] = "null";
+            {
+                var it = map.Keys.GetEnumerator();
+                Assert.IsTrue(it.MoveNext());
+                Assert.IsNull(it.Current);
+                Assert.IsFalse(it.MoveNext());
+                Assert.IsFalse(it.MoveNext());
+            }
+            // 2 more keys:
+            map[key1] = "bar1";
+            map[key2] = "bar2";
+
+            Assert.AreEqual(3, map.Size);
+
+            Assert.AreEqual("bar1", map[key1]);
+            Assert.AreEqual("bar2", map[key2]);
+            Assert.AreEqual(null, map[key3]);
+            Assert.AreEqual("null", map[null]);
+
+            Assert.IsTrue(map.ContainsKey(key1));
+            Assert.IsTrue(map.ContainsKey(key2));
+            Assert.IsFalse(map.ContainsKey(key3));
+            Assert.IsTrue(map.ContainsKey(null));
+
+            // repeat and check that we have no double entries
+            map[key1] = "bar1";
+            map[key2] = "bar2";
+            map[null] = null;
+
+            Assert.AreEqual(3, map.Size);
+
+            Assert.AreEqual("bar1", map[key1]);
+            Assert.AreEqual("bar2", map[key2]);
+            Assert.AreEqual(null, map[key3]);
+            Assert.AreEqual("null", map[null]);
+
+            Assert.IsTrue(map.ContainsKey(key1));
+            Assert.IsTrue(map.ContainsKey(key2));
+            Assert.IsFalse(map.ContainsKey(key3));
+            Assert.IsTrue(map.ContainsKey(null));
+
+            map.Remove(null);
+            Assert.AreEqual(2, map.Size);
+            map.Remove(key1);
+            Assert.AreEqual(1, map.Size);
+            map[key1] = "bar1";
+            map[key2] = "bar2";
+            map[key3] = "bar3";
+            Assert.AreEqual(3, map.Size);
+
+            int c = 0, keysAssigned = 0;
+            for (var enumerator = map.Keys.GetEnumerator(); enumerator.MoveNext(); )
+            {
+                Assert.IsTrue(enumerator.MoveNext()); // try again, should return same result!
+                var k = enumerator.Current;
+                Assert.IsTrue(k == key1 || k == key2 | k == key3);
+                keysAssigned += (k == key1) ? 1 : ((k == key2) ? 2 : 4);
+                c++;
+            }
+            Assert.AreEqual(3, c);
+            Assert.AreEqual(1 + 2 + 4, keysAssigned, "all keys must have been seen");
+
+            c = 0;
+            for (var enumerator = map.Values.GetEnumerator(); enumerator.MoveNext(); )
+            {
+                var v = enumerator.Current;
+                Assert.IsTrue(v.StartsWith("bar"));
+                c++;
+            }
+            Assert.AreEqual(3, c);
+
+            // clear strong refs
+            key1 = key2 = key3 = null;
+
+            // check that GC does not cause problems in reap() method, wait 1 second and let GC work:
+            int size = map.Size;
+            for (int i = 0; size > 0 && i < 10; i++) try
+                {
+                    GC.WaitForPendingFinalizers();
+                    GC.Collect();
+                    int newSize = map.Size;
+                    Assert.IsTrue(size >= newSize, "previousSize(" + size + ")>=newSize(" + newSize + ")");
+                    size = newSize;
+                    Thread.Sleep(TimeSpan.FromMilliseconds(100L));
+                    c = 0;
+                    for (var enumerator = map.Keys.GetEnumerator(); enumerator.MoveNext(); )
+                    {
+                        assertNotNull(enumerator.Current);
+                        c++;
+                    }
+                    newSize = map.Size;
+                    Assert.IsTrue(size >= c, "previousSize(" + size + ")>=iteratorSize(" + c + ")");
+                    Assert.IsTrue(c >= newSize, "iteratorSize(" + c + ")>=newSize(" + newSize + ")");
+                    size = newSize;
+                }
+                catch (ThreadInterruptedException ie) { }
+
+            map.Clear();
+            Assert.AreEqual(0, map.Size);
+            Assert.IsTrue(map.IsEmpty);
+
+            var enumerator2 = map.Keys.GetEnumerator();
+            Assert.IsFalse(enumerator2.MoveNext());
+            try
+            {
+                var current = enumerator2.Current;
+                Fail("Should throw InvalidOperationException");
+            }
+            catch (InvalidOperationException nse)
+            {
+            }
+
+            key1 = "foo";
+            key2 = "foo";
+            map[key1] = "bar1";
+            map[key2] = "bar2";
+            Assert.AreEqual(2, map.Size);
+
+            map.Clear();
+            Assert.AreEqual(0, map.Size);
+            Assert.IsTrue(map.IsEmpty);
+        }
+
+        private sealed class AnonymousThreadClass : ThreadClass
+        {
+            private readonly Random rnd;
+            private readonly WeakIdentityMap<object, int> map;
+            private readonly int keyCount;
+            private readonly AtomicReferenceArray<object> keys;
+
+            public AnonymousThreadClass(Random rnd, WeakIdentityMap<object, int> map, AtomicReferenceArray<object> keys, int keyCount)
+            {
+                this.rnd = rnd;
+                this.map = map;
+                this.keyCount = keyCount;
+                this.keys = keys;
+            }
+
+            public override void Run()
+            {
+                int count = AtLeast(rnd, 10000);
+                for (int i = 0; i < count; i++)
+                {
+                    int j = rnd.Next(keyCount);
+                    switch (rnd.Next(5))
+                    {
+                        case 0:
+                            map[keys[j]] = j;
+                            break;
+                        case 1:
+                            int v = map[keys[j]];
+                            if (v != null)
+                            {
+                                Assert.AreEqual(j, v);
+                            }
+                            break;
+                        case 2:
+                            map.Remove(keys[j]);
+                            break;
+                        case 3:
+                            // renew key, the old one will be GCed at some time:
+                            keys.Set(j, new object());
+                            break;
+                        case 4:
+                            // check iterator still working
+                            for (var it = map.Keys.GetEnumerator(); it.MoveNext(); )
+                            {
+                                Assert.IsNotNull(it.Current);
+                            }
+                            break;
+                        default:
+                            Fail("Should not get here.");
+                            break;
+                    }
+                }
+            }
+        }
+
+        [Test]
+        public void TestConcurrentHashMap()
+        {
+            // don't make threadCount and keyCount random, otherwise easily OOMs or fails otherwise:
+            int threadCount = 8, keyCount = 1024;
+            ExecutorService exec = Executors.newFixedThreadPool(threadCount, new NamedThreadFactory("TestConcurrentHashMap"));
+            WeakIdentityMap<object, int> map =
+              WeakIdentityMap<object, int>.NewConcurrentHashMap(new Random().NextBool());
+            // we keep strong references to the keys,
+            // so WeakIdentityMap will not forget about them:
+            AtomicReferenceArray<object> keys = new AtomicReferenceArray<object>(keyCount);
+            for (int j = 0; j < keyCount; j++)
+            {
+                keys.Set(j, new object());
+            }
+
+            try
+            {
+                for (int t = 0; t < threadCount; t++)
+                {
+                    Random rnd = new Random(new Random().Next());
+                    exec.execute(new AnonymousThreadClass());
+                }
+            }
+            finally
+            {
+                exec.Shutdown();
+                while (!exec.AwaitTermination(1000L, TimeUnit.Milliseconds)) ;
+            }
+
+            // clear strong refs
+            for (int j = 0; j < keyCount; j++)
+            {
+                keys.Set(j, null);
+            }
+
+            // check that GC does not cause problems in reap() method:
+            int size = map.Size;
+            for (int i = 0; size > 0 && i < 10; i++)
+            {
+                try
+                {
+                    GC.WaitForPendingFinalizers();
+                    GC.Collect();
+                    int newSize = map.Size;
+                    Assert.IsTrue(size >= newSize, "previousSize(" + size + ")>=newSize(" + newSize + ")");
+                    size = newSize;
+                    Thread.Sleep(TimeSpan.FromMilliseconds(100L));
+                    int c = 0;
+                    for (var it = map.Keys.GetEnumerator(); it.MoveNext();)
+                    {
+                        assertNotNull(it.Current);
+                        c++;
+                    }
+                    newSize = map.Size;
+                    Assert.IsTrue(size >= c, "previousSize(" + size + ")>=iteratorSize(" + c + ")");
+                    Assert.IsTrue(c >= newSize, "iteratorSize(" + c + ")>=newSize(" + newSize + ")");
+                    size = newSize;
+                }
+                catch (ThreadInterruptedException ie)
+                {
+
+                }
+            }
+        }
+    }
+}


[14/50] [abbrv] git commit: final file. all that remains is clean up and formatting.

Posted by mh...@apache.org.
final file. all that remains is clean up and formatting.


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/3fb1334e
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/3fb1334e
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/3fb1334e

Branch: refs/heads/branch_4x
Commit: 3fb1334e067f24ee8ce1bd328f3d1169d6286bf7
Parents: d9ad1fe
Author: Mike Potts <mi...@feature23.com>
Authored: Sun Jul 21 22:33:23 2013 -0400
Committer: Mike Potts <mi...@feature23.com>
Committed: Sun Jul 21 22:33:23 2013 -0400

----------------------------------------------------------------------
 .../Compressing/CompressingTermVectorsWriter.cs | 803 +++++++++++++++++++
 1 file changed, 803 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/3fb1334e/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs b/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
new file mode 100644
index 0000000..d91cc23
--- /dev/null
+++ b/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
@@ -0,0 +1,803 @@
+using Lucene.Net.Index;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Packed;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Lucene.Net.Codecs.Compressing
+{
+    public sealed class CompressingTermVectorsWriter: TermVectorsWriter
+    {
+        public static int MAX_DOCUMENTS_PER_CHUNK = 128;
+
+        static string VECTORS_EXTENSION = "tvd";
+        static string VECTORS_INDEX_EXTENSION = "tvx";
+
+        static string CODEC_SFX_IDX = "Index";
+        static string CODEC_SFX_DAT = "Data";
+
+        static int VERSION_START = 0;
+        static int VERSION_CURRENT = VERSION_START;
+
+        static int BLOCK_SIZE = 64;
+
+        static int POSITIONS = 0x01;
+        static int   OFFSETS = 0x02;
+        static int  PAYLOADS = 0x04;
+        static int FLAGS_BITS = PackedInts.BitsRequired(POSITIONS | OFFSETS | PAYLOADS);
+
+        private Directory directory;
+        private string segment;
+        private string segmentSuffix;
+        private CompressingStoredFieldsIndexWriter indexWriter;
+        private IndexOutput vectorsStream;
+
+        private CompressionMode compressionMode;
+        private Compressor compressor;
+        private int chunkSize;
+
+        private int numDocs; // total number of docs seen
+        private Deque<DocData> pendingDocs; // pending docs
+        private DocData curDoc; // current document
+        private FieldData curField; // current field
+        private BytesRef lastTerm;
+        private int[] positionsBuf, startOffsetsBuf, lengthsBuf, payloadLengthsBuf;
+        private GrowableByteArrayDataOutput termSuffixes; // buffered term suffixes
+        private GrowableByteArrayDataOutput payloadBytes; // buffered term payloads
+        private BlockPackedWriter writer;
+        
+        /** a pending doc */
+        private class DocData 
+        {
+            int numFields;
+            Deque<FieldData> fields;
+            int posStart, offStart, payStart;
+            DocData(int numFields, int posStart, int offStart, int payStart) {
+                this.numFields = numFields;
+                this.fields = new ArrayDeque<FieldData>(numFields);
+                this.posStart = posStart;
+                this.offStart = offStart;
+                this.payStart = payStart;
+            }
+
+            FieldData addField(int fieldNum, int numTerms, bool positions, bool offsets, bool payloads) 
+            {
+                FieldData field;
+                if (fields.isEmpty()) 
+                {
+                    field = new FieldData(fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
+                } 
+                else 
+                {
+                    FieldData last = fields.getLast();
+                    int posStart = last.posStart + (last.hasPositions ? last.totalPositions : 0);
+                    int offStart = last.offStart + (last.hasOffsets ? last.totalPositions : 0);
+                    int payStart = last.payStart + (last.hasPayloads ? last.totalPositions : 0);
+                    field = new FieldData(fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
+                }
+                fields.add(field);
+                return field;
+            }
+        }
+
+        private DocData addDocData(int numVectorFields) 
+        {
+            FieldData last = null;
+            for (Iterator<DocData> it = pendingDocs.descendingIterator(); it.hasNext(); ) 
+            {
+                final DocData doc = it.next();
+                if (!doc.fields.isEmpty()) 
+                {
+                    last = doc.fields.getLast();
+                    break;
+                }
+            }
+
+            DocData doc;
+            if (last == null) 
+            {
+                doc = new DocData(numVectorFields, 0, 0, 0);
+            } 
+            else 
+            {
+                int posStart = last.posStart + (last.hasPositions ? last.totalPositions : 0);
+                int offStart = last.offStart + (last.hasOffsets ? last.totalPositions : 0);
+                int payStart = last.payStart + (last.hasPayloads ? last.totalPositions : 0);
+                doc = new DocData(numVectorFields, posStart, offStart, payStart);
+            }
+            pendingDocs.add(doc);
+            return doc;
+        }
+
+        /** a pending field */
+        private class FieldData 
+        {
+            bool hasPositions, hasOffsets, hasPayloads;
+            int fieldNum, flags, numTerms;
+            int[] freqs, prefixLengths, suffixLengths;
+            int posStart, offStart, payStart;
+            int totalPositions;
+            int ord;
+
+            public FieldData(int fieldNum, int numTerms, bool positions, bool offsets, bool payloads, int posStart, int offStart, int payStart) 
+            {
+                this.fieldNum = fieldNum;
+                this.numTerms = numTerms;
+                this.hasPositions = positions;
+                this.hasOffsets = offsets;
+                this.hasPayloads = payloads;
+                this.flags = (positions ? POSITIONS : 0) | (offsets ? OFFSETS : 0) | (payloads ? PAYLOADS : 0);
+                this.freqs = new int[numTerms];
+                this.prefixLengths = new int[numTerms];
+                this.suffixLengths = new int[numTerms];
+                this.posStart = posStart;
+                this.offStart = offStart;
+                this.payStart = payStart;
+                totalPositions = 0;
+                ord = 0;
+            }
+
+            public void addTerm(int freq, int prefixLength, int suffixLength) 
+            {
+              freqs[ord] = freq;
+              prefixLengths[ord] = prefixLength;
+              suffixLengths[ord] = suffixLength;
+              ++ord;
+            }
+            
+            public void addPosition(int position, int startOffset, int length, int payloadLength) 
+            {
+              if (hasPositions) 
+              {
+                if (posStart + totalPositions == positionsBuf.length) 
+                {
+                  positionsBuf = ArrayUtil.grow(positionsBuf);
+                }
+
+                positionsBuf[posStart + totalPositions] = position;
+              }
+              if (hasOffsets) {
+                if (offStart + totalPositions == startOffsetsBuf.length) 
+                {
+                  int newLength = ArrayUtil.Oversize(offStart + totalPositions, 4);
+                  startOffsetsBuf = Arrays.CopyOf(startOffsetsBuf, newLength);
+                  lengthsBuf = Arrays.CopyOf(lengthsBuf, newLength);
+                }
+                startOffsetsBuf[offStart + totalPositions] = startOffset;
+                lengthsBuf[offStart + totalPositions] = length;
+              }
+              if (hasPayloads) {
+                if (payStart + totalPositions == payloadLengthsBuf.length) {
+                  payloadLengthsBuf = ArrayUtil.Grow(payloadLengthsBuf);
+                }
+                payloadLengthsBuf[payStart + totalPositions] = payloadLength;
+              }
+              ++totalPositions;
+            }
+        }
+
+        /** Sole constructor. */
+        public CompressingTermVectorsWriter(Directory directory, SegmentInfo si, string segmentSuffix, IOContext context,
+            String formatName, CompressionMode compressionMode, int chunkSize) 
+        {
+            this.directory = directory;
+            this.segment = si.name;
+            this.segmentSuffix = segmentSuffix;
+            this.compressionMode = compressionMode;
+            this.compressor = compressionMode.newCompressor();
+            this.chunkSize = chunkSize;
+
+            numDocs = 0;
+            pendingDocs = new ArrayDeque<DocData>();
+            termSuffixes = new GrowableByteArrayDataOutput(ArrayUtil.Oversize(chunkSize, 1));
+            payloadBytes = new GrowableByteArrayDataOutput(ArrayUtil.Oversize(1, 1));
+            lastTerm = new BytesRef(ArrayUtil.Oversize(30, 1));
+
+            bool success = false;
+            IndexOutput indexStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION), context);
+            try {
+                vectorsStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION), context);
+
+                string codecNameIdx = formatName + CODEC_SFX_IDX;
+                string codecNameDat = formatName + CODEC_SFX_DAT;
+                CodecUtil.writeHeader(indexStream, codecNameIdx, VERSION_CURRENT);
+                CodecUtil.writeHeader(vectorsStream, codecNameDat, VERSION_CURRENT);
+
+                indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
+                indexStream = null;
+
+                vectorsStream.WriteVInt(PackedInts.VERSION_CURRENT);
+                vectorsStream.WriteVInt(chunkSize);
+                writer = new BlockPackedWriter(vectorsStream, BLOCK_SIZE);
+
+                positionsBuf = new int[1024];
+                startOffsetsBuf = new int[1024];
+                lengthsBuf = new int[1024];
+                payloadLengthsBuf = new int[1024];
+
+                success = true;
+            } finally {
+                if (!success) {
+                IOUtils.CloseWhileHandlingException(indexStream);
+                Abort();
+                }
+            }
+        }
+
+        public override void StartDocument(int numVectorFields)
+        {
+            curDoc = addDocData(numVectorFields);
+        }
+
+        public override void FinishDocument() 
+        {
+            // append the payload bytes of the doc after its terms
+            termSuffixes.WriteBytes(payloadBytes.Bytes, payloadBytes.Length);
+            payloadBytes.Length = 0;
+            ++numDocs;
+            if (triggerFlush()) {
+              Flush();
+            }
+            curDoc = null;
+        }
+
+        public override void FinishField()
+        {
+            curField = null;
+        }
+
+        public override void StartField(Index.FieldInfo info, int numTerms, bool positions, bool offsets, bool payloads)
+        {
+            curField = curDoc.AddField(info.number, numTerms, positions, offsets, payloads);
+            lastTerm.length = 0;
+        }
+
+        public override void StartTerm(Util.BytesRef term, int freq)
+        {
+            int prefix = StringHelper.BytesDifference(lastTerm, term);
+            curField.addTerm(freq, prefix, term.length - prefix);
+            termSuffixes.WriteBytes(term.bytes, term.offset + prefix, term.length - prefix);
+            // copy last term
+            if (lastTerm.bytes.Length < term.length) {
+              lastTerm.bytes = new sbyte[ArrayUtil.Oversize(term.length, 1)];
+            }
+            lastTerm.offset = 0;
+            lastTerm.length = term.length;
+            Array.Copy(term.bytes, term.offset, lastTerm.bytes, 0, term.length);
+        }
+
+        public override void AddPosition(int position, int startOffset, int endOffset, Util.BytesRef payload)
+        {
+            curField.addPosition(position, startOffset, endOffset - startOffset, payload == null ? 0 : payload.length);
+            if (curField.HasPayloads && payload != null)
+            {
+                payloadBytes.WriteBytes(payload.bytes, payload.offset, payload.length);
+            }
+        }
+
+        private bool triggerFlush()
+        {
+            return termSuffixes.Length >= chunkSize
+                || pendingDocs.size() >= MAX_DOCUMENTS_PER_CHUNK;
+        }
+
+        private void flush() 
+        {
+            int chunkDocs = pendingDocs.size();
+
+            // write the index file
+            indexWriter.WriteIndex(chunkDocs, vectorsStream.GetFilePointer());
+
+            int docBase = numDocs - chunkDocs;
+            vectorsStream.WriteVInt(docBase);
+            vectorsStream.WriteVInt(chunkDocs);
+
+            // total number of fields of the chunk
+            int totalFields = flushNumFields(chunkDocs);
+
+            if (totalFields > 0) {
+              // unique field numbers (sorted)
+              int[] fieldNums = flushFieldNums();
+              // offsets in the array of unique field numbers
+              flushFields(totalFields, fieldNums);
+              // flags (does the field have positions, offsets, payloads?)
+              flushFlags(totalFields, fieldNums);
+              // number of terms of each field
+              flushNumTerms(totalFields);
+              // prefix and suffix lengths for each field
+              flushTermLengths();
+              // term freqs - 1 (because termFreq is always >=1) for each term
+              flushTermFreqs();
+              // positions for all terms, when enabled
+              flushPositions();
+              // offsets for all terms, when enabled
+              flushOffsets(fieldNums);
+              // payload lengths for all terms, when enabled
+              flushPayloadLengths();
+
+              // compress terms and payloads and write them to the output
+              compressor.Compress(termSuffixes.Bytes, 0, termSuffixes.Length, vectorsStream);
+            }
+
+            // reset
+            pendingDocs.clear();
+            curDoc = null;
+            curField = null;
+            termSuffixes.Length = 0;
+        }
+
+        private int flushNumFields(int chunkDocs) 
+        {
+            if (chunkDocs == 1) {
+              int numFields = pendingDocs.getFirst().numFields;
+              vectorsStream.WriteVInt(numFields);
+              return numFields;
+            } else {
+              writer.Reset(vectorsStream);
+              int totalFields = 0;
+              for (DocData dd : pendingDocs) {
+                writer.Add(dd.numFields);
+                totalFields += dd.numFields;
+              }
+              writer.Finish();
+              return totalFields;
+            }
+        }
+
+          /** Returns a sorted array containing unique field numbers */
+        private int[] flushFieldNums()
+        {
+            SortedSet<int> fieldNums = new TreeSet<int>();
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                fieldNums.Add(fd.fieldNum);
+                }
+            }
+
+            int numDistinctFields = fieldNums.size();
+            int bitsRequired = PackedInts.bitsRequired(fieldNums.Last());
+            int token = (Math.Min(numDistinctFields - 1, 0x07) << 5) | bitsRequired;
+            vectorsStream.WriteByte((byte) token);
+            if (numDistinctFields - 1 >= 0x07) {
+                vectorsStream.WriteVInt(numDistinctFields - 1 - 0x07);
+            }
+            PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldNums.size(), bitsRequired, 1);
+            for (int fieldNum : fieldNums) {
+                writer.Add(fieldNum);
+            }
+            writer.Finish();
+
+            int[] fns = new int[fieldNums.size()];
+            int i = 0;
+            for (int key : fieldNums) {
+                fns[i++] = key;
+            }
+            return fns;
+        }
+
+        private void flushFields(int totalFields, int[] fieldNums) throws IOException {
+            final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, PackedInts.bitsRequired(fieldNums.length - 1), 1);
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                final int fieldNumIndex = Arrays.binarySearch(fieldNums, fd.fieldNum);
+                assert fieldNumIndex >= 0;
+                writer.add(fieldNumIndex);
+                }
+            }
+            writer.finish();
+        }
+
+        private void flushFlags(int totalFields, int[] fieldNums) 
+        {
+            // check if fields always have the same flags
+            bool nonChangingFlags = true;
+            int[] fieldFlags = new int[fieldNums.Length];
+            Arrays.Fill(fieldFlags, -1);
+            outer:
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                int fieldNumOff = Arrays.BinarySearch(fieldNums, fd.ieldNum);
+                if (fieldFlags[fieldNumOff] == -1) {
+                    fieldFlags[fieldNumOff] = fd.flags;
+                } else if (fieldFlags[fieldNumOff] != fd.flags) {
+                    nonChangingFlags = false;
+                    break outer;
+                }
+                }
+            }
+
+            if (nonChangingFlags) {
+                // write one flag per field num
+                vectorsStream.WriteVInt(0);
+                PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldFlags.length, FLAGS_BITS, 1);
+                for (int flags : fieldFlags) {
+                writer.Add(flags);
+                }
+                writer.Finish();
+            } else {
+                // write one flag for every field instance
+                vectorsStream.WriteVInt(1);
+                PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, FLAGS_BITS, 1);
+                for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                    writer.add(fd.flags);
+                }
+                }
+                writer.Finish();
+            }
+        }
+
+        private void flushNumTerms(int totalFields) 
+        {
+            int maxNumTerms = 0;
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                maxNumTerms |= fd.numTerms;
+                }
+            }
+            
+            int bitsRequired = PackedInts.bitsRequired(maxNumTerms);
+            vectorsStream.WriteVInt(bitsRequired);
+            PackedInts.Writer writer = PackedInts.getWriterNoHeader(
+                vectorsStream, PackedInts.Format.PACKED, totalFields, bitsRequired, 1);
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                writer.add(fd.numTerms);
+                }
+            }
+            writer.finish();
+        }
+
+        private void flushTermLengths() 
+        {
+            writer.reset(vectorsStream);
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                for (int i = 0; i < fd.numTerms; ++i) {
+                    writer.add(fd.prefixLengths[i]);
+                }
+                }
+            }
+            writer.finish();
+            writer.reset(vectorsStream);
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                for (int i = 0; i < fd.numTerms; ++i) {
+                    writer.add(fd.suffixLengths[i]);
+                }
+                }
+            }
+            writer.finish();
+        }
+
+        private void flushTermFreqs() 
+        {
+            writer.reset(vectorsStream);
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                for (int i = 0; i < fd.numTerms; ++i) {
+                    writer.add(fd.freqs[i] - 1);
+                }
+                }
+            }
+            writer.finish();
+        }
+
+        private void flushPositions()
+        {
+            writer.reset(vectorsStream);
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                if (fd.hasPositions) {
+                    int pos = 0;
+                    for (int i = 0; i < fd.numTerms; ++i) {
+                    int previousPosition = 0;
+                    for (int j = 0; j < fd.freqs[i]; ++j) {
+                        int position = positionsBuf[fd .posStart + pos++];
+                        writer.add(position - previousPosition);
+                        previousPosition = position;
+                    }
+                    }
+                }
+                }
+            }
+            writer.finish();
+        }
+
+        private void flushOffsets(int[] fieldNums) 
+        {
+            bool hasOffsets = false;
+            long[] sumPos = new long[fieldNums.length];
+            long[] sumOffsets = new long[fieldNums.length];
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                hasOffsets |= fd.hasOffsets;
+                if (fd.hasOffsets && fd.hasPositions) {
+                    int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
+                    int pos = 0;
+                    for (int i = 0; i < fd.numTerms; ++i) {
+                    int previousPos = 0;
+                    int previousOff = 0;
+                    for (int j = 0; j < fd.freqs[i]; ++j) {
+                        int position = positionsBuf[fd.posStart + pos];
+                        int startOffset = startOffsetsBuf[fd.offStart + pos];
+                        sumPos[fieldNumOff] += position - previousPos;
+                        sumOffsets[fieldNumOff] += startOffset - previousOff;
+                        previousPos = position;
+                        previousOff = startOffset;
+                        ++pos;
+                    }
+                    }
+                }
+                }
+            }
+
+            if (!hasOffsets) {
+                // nothing to do
+                return;
+            }
+
+            float[] charsPerTerm = new float[fieldNums.length];
+            for (int i = 0; i < fieldNums.length; ++i) {
+                charsPerTerm[i] = (sumPos[i] <= 0 || sumOffsets[i] <= 0) ? 0 : (float) ((double) sumOffsets[i] / sumPos[i]);
+            }
+
+            // start offsets
+            for (int i = 0; i < fieldNums.length; ++i) {
+                vectorsStream.writeInt(Float.floatToRawIntBits(charsPerTerm[i]));
+            }
+
+            writer.reset(vectorsStream);
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                if ((fd.flags & OFFSETS) != 0) {
+                    int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
+                    float cpt = charsPerTerm[fieldNumOff];
+                    int pos = 0;
+                    for (int i = 0; i < fd.numTerms; ++i) {
+                    int previousPos = 0;
+                    int previousOff = 0;
+                    for (int j = 0; j < fd.freqs[i]; ++j) {
+                        final int position = fd.hasPositions ? positionsBuf[fd.posStart + pos] : 0;
+                        final int startOffset = startOffsetsBuf[fd.offStart + pos];
+                        writer.add(startOffset - previousOff - (int) (cpt * (position - previousPos)));
+                        previousPos = position;
+                        previousOff = startOffset;
+                        ++pos;
+                    }
+                    }
+                }
+                }
+            }
+            writer.finish();
+
+            // lengths
+            writer.reset(vectorsStream);
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                if ((fd.flags & OFFSETS) != 0) {
+                    int pos = 0;
+                    for (int i = 0; i < fd.numTerms; ++i) {
+                    for (int j = 0; j < fd.freqs[i]; ++j) {
+                        writer.add(lengthsBuf[fd.offStart + pos++] - fd.prefixLengths[i] - fd.suffixLengths[i]);
+                    }
+                    }
+                }
+                }
+            }
+            writer.finish();
+        }
+
+        private void flushPayloadLengths() 
+        {
+            writer.reset(vectorsStream);
+            for (DocData dd : pendingDocs) {
+                for (FieldData fd : dd.fields) {
+                if (fd.hasPayloads) {
+                    for (int i = 0; i < fd.totalPositions; ++i) {
+                    writer.add(payloadLengthsBuf[fd.payStart + i]);
+                    }
+                }
+                }
+            }
+            writer.finish();
+        }
+
+
+
+        public override void Abort()
+        {
+            IOUtils.CloseWhileHandlingException(this);
+            IOUtils.DeleteFilesIgnoringExceptions(directory,
+            IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION),
+            IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION));
+        }
+
+        public override void Finish(Index.FieldInfos fis, int numDocs)
+        {
+            if (!pendingDocs.isEmpty()) {
+              flush();
+            }
+            if (numDocs != this.numDocs) {
+              throw new RuntimeException("Wrote " + this.numDocs + " docs, finish called with numDocs=" + numDocs);
+            }
+            indexWriter.finish(numDocs);
+        }
+
+        public override IComparer<Util.BytesRef> Comparator
+        {
+            get 
+            { 
+                return BytesRef.getUTF8SortedAsUnicodeComparator(); 
+            }
+        }
+
+        public void addProx(int numProx, DataInput positions, DataInput offsets)
+        {
+
+            if (curField.hasPositions) {
+                final int posStart = curField.posStart + curField.totalPositions;
+                if (posStart + numProx > positionsBuf.length) {
+                positionsBuf = ArrayUtil.grow(positionsBuf, posStart + numProx);
+                }
+                int position = 0;
+                if (curField.hasPayloads) {
+                final int payStart = curField.payStart + curField.totalPositions;
+                if (payStart + numProx > payloadLengthsBuf.length) {
+                    payloadLengthsBuf = ArrayUtil.grow(payloadLengthsBuf, payStart + numProx);
+                }
+                for (int i = 0; i < numProx; ++i) {
+                    final int code = positions.readVInt();
+                    if ((code & 1) != 0) {
+                    // This position has a payload
+                    final int payloadLength = positions.readVInt();
+                    payloadLengthsBuf[payStart + i] = payloadLength;
+                    payloadBytes.copyBytes(positions, payloadLength);
+                    } else {
+                    payloadLengthsBuf[payStart + i] = 0;
+                    }
+                    position += code >>> 1;
+                    positionsBuf[posStart + i] = position;
+                }
+                } else {
+                for (int i = 0; i < numProx; ++i) {
+                    position += (positions.readVInt() >>> 1);
+                    positionsBuf[posStart + i] = position;
+                }
+                }
+            }
+
+            if (curField.hasOffsets) {
+                int offStart = curField.offStart + curField.totalPositions;
+                if (offStart + numProx > startOffsetsBuf.length) {
+                    int newLength = ArrayUtil.oversize(offStart + numProx, 4);
+                    startOffsetsBuf = Arrays.copyOf(startOffsetsBuf, newLength);
+                    lengthsBuf = Arrays.copyOf(lengthsBuf, newLength);
+                }
+                
+                int lastOffset = 0, startOffset, endOffset;
+                for (int i = 0; i < numProx; ++i) {
+                startOffset = lastOffset + offsets.readVInt();
+                endOffset = startOffset + offsets.readVInt();
+                lastOffset = endOffset;
+                startOffsetsBuf[offStart + i] = startOffset;
+                lengthsBuf[offStart + i] = endOffset - startOffset;
+                }
+            }
+
+            curField.totalPositions += numProx;
+        }
+
+        public int merge(MergeState mergeState) 
+        {
+            int docCount = 0;
+            int idx = 0;
+
+            for (AtomicReader reader : mergeState.readers) 
+            {
+                SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
+                CompressingTermVectorsReader matchingVectorsReader = null;
+                if (matchingSegmentReader != null) {
+                TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();
+                // we can only bulk-copy if the matching reader is also a CompressingTermVectorsReader
+                if (vectorsReader != null && vectorsReader instanceof CompressingTermVectorsReader) {
+                    matchingVectorsReader = (CompressingTermVectorsReader) vectorsReader;
+                }
+                }
+
+                int maxDoc = reader.maxDoc();
+                Bits liveDocs = reader.getLiveDocs();
+
+                if (matchingVectorsReader == null
+                    || matchingVectorsReader.getCompressionMode() != compressionMode
+                    || matchingVectorsReader.getChunkSize() != chunkSize
+                    || matchingVectorsReader.getPackedIntsVersion() != PackedInts.VERSION_CURRENT) {
+                // naive merge...
+                for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
+                    Fields vectors = reader.getTermVectors(i);
+                    addAllDocVectors(vectors, mergeState);
+                    ++docCount;
+                    mergeState.checkAbort.work(300);
+                }
+                } else {
+                CompressingStoredFieldsIndexReader index = matchingVectorsReader.getIndex();
+                IndexInput vectorsStream = matchingVectorsReader.getVectorsStream();
+                for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; ) {
+                    if (pendingDocs.isEmpty()
+                        && (i == 0 || index.getStartPointer(i - 1) < index.getStartPointer(i))) { // start of a chunk
+                    long startPointer = index.getStartPointer(i);
+                    vectorsStream.seek(startPointer);
+                    int docBase = vectorsStream.readVInt();
+                    int chunkDocs = vectorsStream.readVInt();
+                    if (docBase + chunkDocs < matchingSegmentReader.maxDoc()
+                        && nextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs) {
+                        long chunkEnd = index.getStartPointer(docBase + chunkDocs);
+                        long chunkLength = chunkEnd - vectorsStream.getFilePointer();
+                        indexWriter.writeIndex(chunkDocs, this.vectorsStream.getFilePointer());
+                        this.vectorsStream.writeVInt(docCount);
+                        this.vectorsStream.writeVInt(chunkDocs);
+                        this.vectorsStream.copyBytes(vectorsStream, chunkLength);
+                        docCount += chunkDocs;
+                        this.numDocs += chunkDocs;
+                        mergeState.checkAbort.work(300 * chunkDocs);
+                        i = nextLiveDoc(docBase + chunkDocs, liveDocs, maxDoc);
+                    } else {
+                        for (; i < docBase + chunkDocs; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
+                        Fields vectors = reader.getTermVectors(i);
+                        addAllDocVectors(vectors, mergeState);
+                        ++docCount;
+                        mergeState.checkAbort.work(300);
+                        }
+                    }
+                    } else {
+                    Fields vectors = reader.getTermVectors(i);
+                    addAllDocVectors(vectors, mergeState);
+                    ++docCount;
+                    mergeState.checkAbort.work(300);
+                    i = nextLiveDoc(i + 1, liveDocs, maxDoc);
+                    }
+                }
+                }
+            }
+            finish(mergeState.fieldInfos, docCount);
+            return docCount;
+        }
+
+        private static int nextLiveDoc(int doc, Bits liveDocs, int maxDoc) 
+        {
+            if (liveDocs == null) {
+                return doc;
+            }
+            while (doc < maxDoc && !liveDocs.get(doc)) {
+                ++doc;
+            }
+            return doc;
+        }
+
+        private static int nextDeletedDoc(int doc, Bits liveDocs, int maxDoc) 
+        {
+            if (liveDocs == null) {
+                return maxDoc;
+            }
+            while (doc < maxDoc && liveDocs.get(doc)) {
+                ++doc;
+            }
+            return doc;
+        }
+
+        protected override void Dispose(bool disposing)
+        {
+            try 
+            {
+                IOUtils.Close(vectorsStream, indexWriter);
+            } finally {
+                vectorsStream = null;
+                indexWriter = null;
+            }
+        }
+    }
+}


[24/50] [abbrv] Error Cleanup

Posted by mh...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/CharsRef.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/CharsRef.cs b/src/core/Util/CharsRef.cs
index c1fddeb..ebdc541 100644
--- a/src/core/Util/CharsRef.cs
+++ b/src/core/Util/CharsRef.cs
@@ -210,6 +210,71 @@ namespace Lucene.Net.Util
             return new CharsRef(chars, offset + start, end - start);
         }
 
+        private static readonly IComparer<CharsRef> utf16SortedAsUTF8SortOrder = new UTF16SortedAsUTF8ComparatorImpl();
+
+        public static IComparer<CharsRef> UTF16SortedAsUTF8Comparator
+        {
+            get { return utf16SortedAsUTF8SortOrder; }
+        }
+
+        private class UTF16SortedAsUTF8ComparatorImpl : Comparer<CharsRef>
+        {
+            public UTF16SortedAsUTF8ComparatorImpl()
+            {
+            }
+
+            public override int Compare(CharsRef a, CharsRef b)
+            {
+                if (a == b)
+                    return 0;
+
+                char[] aChars = a.chars;
+                int aUpto = a.offset;
+                char[] bChars = b.chars;
+                int bUpto = b.offset;
+
+                int aStop = aUpto + Math.Min(a.length, b.length);
+
+                while (aUpto < aStop)
+                {
+                    char aChar = aChars[aUpto++];
+                    char bChar = bChars[bUpto++];
+                    if (aChar != bChar)
+                    {
+                        // http://icu-project.org/docs/papers/utf16_code_point_order.html
+
+                        /* aChar != bChar, fix up each one if they're both in or above the surrogate range, then compare them */
+                        if (aChar >= 0xd800 && bChar >= 0xd800)
+                        {
+                            if (aChar >= 0xe000)
+                            {
+                                aChar -= (char)0x800;
+                            }
+                            else
+                            {
+                                aChar += (char)0x2000;
+                            }
+
+                            if (bChar >= 0xe000)
+                            {
+                                bChar -= (char)0x800;
+                            }
+                            else
+                            {
+                                bChar += (char)0x2000;
+                            }
+                        }
+
+                        /* now aChar and bChar are in code point order */
+                        return (int)aChar - (int)bChar; /* int must be 32 bits wide */
+                    }
+                }
+
+                // One is a prefix of the other, or, they are equal:
+                return a.length - b.length;
+            }
+        }
+
         [Obsolete("This comparer is only a transition mechanism")]
         private static readonly Comparer<CharsRef> utf8SortedAsUTF16SortOrder = new UTF8SortedAsUTF16ComparerImpl();
 
@@ -288,7 +353,7 @@ namespace Lucene.Net.Util
             return clone;
         }
 
-        public bool isValid()
+        public bool IsValid()
         {
             if (chars == null)
             {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/FixedBitSet.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/FixedBitSet.cs b/src/core/Util/FixedBitSet.cs
index 66da470..605bb8c 100644
--- a/src/core/Util/FixedBitSet.cs
+++ b/src/core/Util/FixedBitSet.cs
@@ -102,7 +102,7 @@ namespace Lucene.Net.Util
             }
         }
 
-        public bool Set(int index)
+        public void Set(int index)
         {
             //assert index >= 0 && index < numBits: "index=" + index + " numBits=" + numBits;
             int wordNum = index >> 6;      // div 64
@@ -192,7 +192,7 @@ namespace Lucene.Net.Util
 
         public void Or(DocIdSetIterator iter)
         {
-            if (iter is OpenBitSetIterator && iter.DocID() == -1)
+            if (iter is OpenBitSetIterator && iter.DocID == -1)
             {
                 OpenBitSetIterator obs = (OpenBitSetIterator)iter;
                 Or(obs.arr, obs.words);
@@ -227,7 +227,7 @@ namespace Lucene.Net.Util
 
         public void And(DocIdSetIterator iter)
         {
-            if (iter is OpenBitSetIterator && iter.DocID() == -1)
+            if (iter is OpenBitSetIterator && iter.DocID == -1)
             {
                 OpenBitSetIterator obs = (OpenBitSetIterator)iter;
                 And(obs.arr, obs.words);
@@ -273,7 +273,7 @@ namespace Lucene.Net.Util
 
         public void AndNot(DocIdSetIterator iter)
         {
-            if (iter is OpenBitSetIterator && iter.DocID() == -1)
+            if (iter is OpenBitSetIterator && iter.DocID == -1)
             {
                 OpenBitSetIterator obs = (OpenBitSetIterator)iter;
                 AndNot(obs.arr, obs.words);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/Fst/FST.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/FST.cs b/src/core/Util/Fst/FST.cs
index 09a3849..96f0cf7 100644
--- a/src/core/Util/Fst/FST.cs
+++ b/src/core/Util/Fst/FST.cs
@@ -1009,7 +1009,7 @@ namespace Lucene.Net.Util.Fst
                     if (bottom == null)
                     {
                         q.Add(new NodeAndInCount(node, (int)InCounts.Get(node)));
-                        if (q.Size() == topN)
+                        if (q.Size == topN)
                             bottom = q.Top();
                     }
                     else if (InCounts.Get(node) > bottom.Count)
@@ -1022,7 +1022,7 @@ namespace Lucene.Net.Util.Fst
             // Free up RAM
             InCounts = null;
             var topNodeMap = new HashMap<long, long>();
-            for (var downTo = q.Size() - 1; downTo >= 0; downTo--)
+            for (var downTo = q.Size - 1; downTo >= 0; downTo--)
             {
                 var n = q.Pop();
                 topNodeMap.Add(n.Node, downTo);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/Fst/FSTEnum.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/FSTEnum.cs b/src/core/Util/Fst/FSTEnum.cs
index 5c4839a..2475108 100644
--- a/src/core/Util/Fst/FSTEnum.cs
+++ b/src/core/Util/Fst/FSTEnum.cs
@@ -9,7 +9,7 @@ namespace Lucene.Net.Util.Fst
 
         protected FST<T>.Arc<T>[] arcs = new FST<T>.Arc<T>[10];
 
-        protected T[] output = (T[])new Object[10];
+        protected T[] output = new T[10];
 
         protected readonly T NO_OUTPUT;
         protected readonly FST.BytesReader fstReader;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/Fst/ForwardBytesReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Fst/ForwardBytesReader.cs b/src/core/Util/Fst/ForwardBytesReader.cs
index bcbb357..5d6e672 100644
--- a/src/core/Util/Fst/ForwardBytesReader.cs
+++ b/src/core/Util/Fst/ForwardBytesReader.cs
@@ -25,7 +25,7 @@ namespace Lucene.Net.Util.Fst
 
         public override byte ReadByte()
         {
-            return bytes[Position++];
+            return (byte)bytes[Position++];
         }
 
         public override void ReadBytes(byte[] bytes, int offset, int len)

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/OpenBitSetIterator.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/OpenBitSetIterator.cs b/src/core/Util/OpenBitSetIterator.cs
index febdd83..3e76978 100644
--- a/src/core/Util/OpenBitSetIterator.cs
+++ b/src/core/Util/OpenBitSetIterator.cs
@@ -86,8 +86,8 @@ namespace Lucene.Net.Util
         // for efficiency, or have a common root interface?  (or
         // maybe both?  could ask for a SetBitsIterator, etc...
 
-        private readonly long[] arr;
-        private readonly int words;
+        internal readonly long[] arr;
+        internal readonly int words;
         private int i = -1;
         private long word;
         private int wordShift;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/25ec42a2/src/core/Util/Packed/BulkOperationPacked.cs
----------------------------------------------------------------------
diff --git a/src/core/Util/Packed/BulkOperationPacked.cs b/src/core/Util/Packed/BulkOperationPacked.cs
index 5ea2438..67eda8d 100644
--- a/src/core/Util/Packed/BulkOperationPacked.cs
+++ b/src/core/Util/Packed/BulkOperationPacked.cs
@@ -232,7 +232,7 @@ namespace Lucene.Net.Util.Packed
 
         public override void Encode(long[] values, int valuesOffset, sbyte[] blocks, int blocksOffset, int iterations)
         {
-            int nextBlock = 0;
+            uint nextBlock = 0;
             int bitsLeft = 8;
             for (int i = 0; i < byteValueCount * iterations; ++i)
             {
@@ -256,7 +256,7 @@ namespace Lucene.Net.Util.Packed
                     }
                     // then buffer
                     bitsLeft = 8 - bits;
-                    nextBlock = (int)((v & ((1L << bits) - 1)) << bitsLeft);
+                    nextBlock = (uint)((v & ((1L << bits) - 1)) << bitsLeft);
                 }
             }
             //assert bitsLeft == 8;


[43/50] [abbrv] git commit: Initial port of classic QueryParser. Broken.

Posted by mh...@apache.org.
Initial port of classic QueryParser. Broken.

Gets stuck in infinite loop if you search with the default field. If
searching with a field specified or the *:* query, it errors out.


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/0e6eb14a
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/0e6eb14a
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/0e6eb14a

Branch: refs/heads/branch_4x
Commit: 0e6eb14ac34d87812d5f49a59278a67eaafae531
Parents: 7a4b442
Author: Paul Irwin <pa...@gmail.com>
Authored: Thu Aug 8 17:46:19 2013 -0400
Committer: Paul Irwin <pa...@gmail.com>
Committed: Thu Aug 8 17:46:19 2013 -0400

----------------------------------------------------------------------
 build/vs2012/Lucene.Net.All/Lucene.Net.All.sln  |   11 +
 src/contrib/Core/Analysis/Ext/Analysis.Ext.cs   |    1 +
 src/contrib/Core/Contrib.Core.csproj            |    7 +-
 .../QueryParsers/Classic/FastCharStream.cs      |  134 ++
 src/contrib/QueryParsers/Classic/ICharStream.cs |   37 +
 .../QueryParsers/Classic/ParseException.cs      |  153 +++
 src/contrib/QueryParsers/Classic/QueryParser.cs |  785 ++++++++++++
 .../QueryParsers/Classic/QueryParserBase.cs     | 1033 +++++++++++++++
 .../Classic/QueryParserConstants.cs             |  126 ++
 .../Classic/QueryParserTokenManager.cs          | 1188 ++++++++++++++++++
 src/contrib/QueryParsers/Classic/Token.cs       |  104 ++
 .../QueryParsers/Classic/TokenMgrError.cs       |  105 ++
 .../QueryParsers/Contrib.QueryParsers.csproj    |   69 +
 .../Standard/ICommonQueryParserConfiguration.cs |   37 +
 .../QueryParsers/Properties/AssemblyInfo.cs     |   36 +
 15 files changed, 3825 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/build/vs2012/Lucene.Net.All/Lucene.Net.All.sln
----------------------------------------------------------------------
diff --git a/build/vs2012/Lucene.Net.All/Lucene.Net.All.sln b/build/vs2012/Lucene.Net.All/Lucene.Net.All.sln
index d8c2cc2..0b9121d 100644
--- a/build/vs2012/Lucene.Net.All/Lucene.Net.All.sln
+++ b/build/vs2012/Lucene.Net.All/Lucene.Net.All.sln
@@ -37,6 +37,8 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Contrib.WordNet.SynLookup",
 EndProject
 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Contrib.WordNet.Syns2Index", "..\..\..\src\contrib\WordNet\Syns2Index\Contrib.WordNet.Syns2Index.csproj", "{7563D4D9-AE91-42BA-A270-1D264660F6DF}"
 EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Contrib.QueryParsers", "..\..\..\src\contrib\QueryParsers\Contrib.QueryParsers.csproj", "{56438272-B00E-40DE-9C9A-0785E705E7D9}"
+EndProject
 Global
 	GlobalSection(SolutionConfigurationPlatforms) = preSolution
 		Debug|Any CPU = Debug|Any CPU
@@ -173,6 +175,14 @@ Global
 		{7563D4D9-AE91-42BA-A270-1D264660F6DF}.Release|Any CPU.Build.0 = Release|Any CPU
 		{7563D4D9-AE91-42BA-A270-1D264660F6DF}.Release35|Any CPU.ActiveCfg = Release35|Any CPU
 		{7563D4D9-AE91-42BA-A270-1D264660F6DF}.Release35|Any CPU.Build.0 = Release35|Any CPU
+		{56438272-B00E-40DE-9C9A-0785E705E7D9}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+		{56438272-B00E-40DE-9C9A-0785E705E7D9}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{56438272-B00E-40DE-9C9A-0785E705E7D9}.Debug35|Any CPU.ActiveCfg = Debug|Any CPU
+		{56438272-B00E-40DE-9C9A-0785E705E7D9}.Debug35|Any CPU.Build.0 = Debug|Any CPU
+		{56438272-B00E-40DE-9C9A-0785E705E7D9}.Release|Any CPU.ActiveCfg = Release|Any CPU
+		{56438272-B00E-40DE-9C9A-0785E705E7D9}.Release|Any CPU.Build.0 = Release|Any CPU
+		{56438272-B00E-40DE-9C9A-0785E705E7D9}.Release35|Any CPU.ActiveCfg = Release|Any CPU
+		{56438272-B00E-40DE-9C9A-0785E705E7D9}.Release35|Any CPU.Build.0 = Release|Any CPU
 	EndGlobalSection
 	GlobalSection(SolutionProperties) = preSolution
 		HideSolutionNode = FALSE
@@ -194,5 +204,6 @@ Global
 		{1407C9BA-337C-4C6C-B065-68328D3871B3} = {7E19085A-545B-4DE8-BBF5-B1DBC370FD37}
 		{2CA12E3F-76E1-4FA6-9E87-37079A7B7C69} = {7E19085A-545B-4DE8-BBF5-B1DBC370FD37}
 		{7563D4D9-AE91-42BA-A270-1D264660F6DF} = {7E19085A-545B-4DE8-BBF5-B1DBC370FD37}
+		{56438272-B00E-40DE-9C9A-0785E705E7D9} = {7E19085A-545B-4DE8-BBF5-B1DBC370FD37}
 	EndGlobalSection
 EndGlobal

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/Core/Analysis/Ext/Analysis.Ext.cs
----------------------------------------------------------------------
diff --git a/src/contrib/Core/Analysis/Ext/Analysis.Ext.cs b/src/contrib/Core/Analysis/Ext/Analysis.Ext.cs
index beec3fd..0903cfb 100644
--- a/src/contrib/Core/Analysis/Ext/Analysis.Ext.cs
+++ b/src/contrib/Core/Analysis/Ext/Analysis.Ext.cs
@@ -24,6 +24,7 @@ using System.IO;
 using Lucene.Net.Analysis;
 using Lucene.Net.Analysis.Tokenattributes;
 using Lucene.Net.Util;
+using Lucene.Net.Analysis.Core;
 
 
 namespace Lucene.Net.Analysis.Ext

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/Core/Contrib.Core.csproj
----------------------------------------------------------------------
diff --git a/src/contrib/Core/Contrib.Core.csproj b/src/contrib/Core/Contrib.Core.csproj
index bced134..6e1bdb9 100644
--- a/src/contrib/Core/Contrib.Core.csproj
+++ b/src/contrib/Core/Contrib.Core.csproj
@@ -30,7 +30,8 @@
     <RootNamespace>Lucene.Net</RootNamespace>
     <AssemblyName>Lucene.Net.Contrib.Core</AssemblyName>
     <FileAlignment>512</FileAlignment>
-    <FileUpgradeFlags></FileUpgradeFlags>
+    <FileUpgradeFlags>
+    </FileUpgradeFlags>
     <OldToolsVersion>3.5</OldToolsVersion>
     <UpgradeBackupLocation />
     <PublishUrl>publish\</PublishUrl>
@@ -126,6 +127,10 @@
       <Project>{5D4AD9BE-1FFB-41AB-9943-25737971BF57}</Project>
       <Name>Lucene.Net</Name>
     </ProjectReference>
+    <ProjectReference Include="..\Analyzers\Contrib.Analyzers.csproj">
+      <Project>{4286e961-9143-4821-b46d-3d39d3736386}</Project>
+      <Name>Contrib.Analyzers</Name>
+    </ProjectReference>
   </ItemGroup>
   <ItemGroup>
     <BootstrapperPackage Include=".NETFramework,Version=v4.0">

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Classic/FastCharStream.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/FastCharStream.cs b/src/contrib/QueryParsers/Classic/FastCharStream.cs
new file mode 100644
index 0000000..6e3a39e
--- /dev/null
+++ b/src/contrib/QueryParsers/Classic/FastCharStream.cs
@@ -0,0 +1,134 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.QueryParsers.Classic
+{
+    public sealed class FastCharStream : ICharStream
+    {
+        internal char[] buffer = null;
+
+        internal int bufferLength = 0;          // end of valid chars
+        internal int bufferPosition = 0;        // next char to read
+
+        internal int tokenStart = 0;          // offset in buffer
+        internal int bufferStart = 0;          // position in file of buffer
+
+        internal TextReader input;            // source of chars
+
+        public FastCharStream(TextReader r)
+        {
+            input = r;
+        }
+
+        public char ReadChar()
+        {
+            if (bufferPosition >= bufferLength)
+                Refill();
+            return buffer[bufferPosition++];
+        }
+
+        private void Refill()
+        {
+            int newPosition = bufferLength - tokenStart;
+
+            if (tokenStart == 0)
+            {        // token won't fit in buffer
+                if (buffer == null)
+                {        // first time: alloc buffer
+                    buffer = new char[2048];
+                }
+                else if (bufferLength == buffer.Length)
+                { // grow buffer
+                    char[] newBuffer = new char[buffer.Length * 2];
+                    Array.Copy(buffer, 0, newBuffer, 0, bufferLength);
+                    buffer = newBuffer;
+                }
+            }
+            else
+            {            // shift token to front
+                Array.Copy(buffer, tokenStart, buffer, 0, newPosition);
+            }
+
+            bufferLength = newPosition;        // update state
+            bufferPosition = newPosition;
+            bufferStart += tokenStart;
+            tokenStart = 0;
+
+            int charsRead =          // fill space in buffer
+              input.Read(buffer, newPosition, buffer.Length - newPosition);
+            if (charsRead == -1)
+                throw new IOException("read past eof");
+            else
+                bufferLength += charsRead;
+        }
+        
+        public char BeginToken()
+        {
+            tokenStart = bufferPosition;
+            return ReadChar();
+        }
+        
+        public void Backup(int amount)
+        {
+            bufferPosition -= amount;
+        }
+
+        public string GetImage()
+        {
+            return new String(buffer, tokenStart, bufferPosition - tokenStart);
+        }
+
+        public char[] GetSuffix(int len)
+        {
+            char[] value = new char[len];
+            Array.Copy(buffer, bufferPosition - len, value, 0, len);
+            return value;
+        }
+
+        public void Done()
+        {
+            try
+            {
+                input.Close();
+            }
+            catch (IOException)
+            {
+            }
+        }
+
+        public int Column
+        {
+            get { return bufferStart + bufferPosition; }
+        }
+
+        public int Line
+        {
+            get { return 1; }
+        }
+
+        public int EndColumn
+        {
+            get { return bufferStart + bufferPosition; }
+        }
+
+        public int EndLine
+        {
+            get { return 1; }
+        }
+
+        public int BeginColumn
+        {
+            get { return bufferStart + tokenStart; }
+        }
+
+        public int BeginLine
+        {
+            get { return 1; }
+        }
+        
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Classic/ICharStream.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/ICharStream.cs b/src/contrib/QueryParsers/Classic/ICharStream.cs
new file mode 100644
index 0000000..d68cfba
--- /dev/null
+++ b/src/contrib/QueryParsers/Classic/ICharStream.cs
@@ -0,0 +1,37 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.QueryParsers.Classic
+{
+    public interface ICharStream
+    {
+        char ReadChar();
+
+        [Obsolete]
+        int Column { get; }
+
+        [Obsolete]
+        int Line { get; }
+
+        int EndColumn { get; }
+
+        int EndLine { get; }
+
+        int BeginColumn { get; }
+
+        int BeginLine { get; }
+
+        void Backup(int amount);
+
+        char BeginToken();
+
+        string GetImage();
+
+        char[] GetSuffix(int len);
+
+        void Done();
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Classic/ParseException.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/ParseException.cs b/src/contrib/QueryParsers/Classic/ParseException.cs
new file mode 100644
index 0000000..253c0cb
--- /dev/null
+++ b/src/contrib/QueryParsers/Classic/ParseException.cs
@@ -0,0 +1,153 @@
+using System;
+using System.Collections.Generic;
+using System.Configuration;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace Lucene.Net.QueryParsers.Classic
+{
+    public class ParseException : Exception
+    {
+        private const long serialVersionUID = 1L;
+
+        public ParseException(Token currentTokenVal,
+                        int[][] expectedTokenSequencesVal,
+                        String[] tokenImageVal
+                       )
+            : base(Initialise(currentTokenVal, expectedTokenSequencesVal, tokenImageVal))
+        {
+            currentToken = currentTokenVal;
+            expectedTokenSequences = expectedTokenSequencesVal;
+            tokenImage = tokenImageVal;
+        }
+
+        public ParseException()
+            : base()
+        {
+        }
+
+        public ParseException(String message)
+            : base(message)
+        {
+        }
+
+        // .NET Port: not present in Java version but needed for inner exception
+        public ParseException(String message, Exception innerException)
+            : base(message, innerException)
+        {
+        }
+
+
+        public Token currentToken;
+
+        public int[][] expectedTokenSequences;
+
+        public String[] tokenImage;
+
+        private static String Initialise(Token currentToken,
+                           int[][] expectedTokenSequences,
+                           String[] tokenImage)
+        {
+            String eol = ConfigurationManager.AppSettings["line.separator"] ?? "\n";
+            StringBuilder expected = new StringBuilder();
+            int maxSize = 0;
+            for (int i = 0; i < expectedTokenSequences.Length; i++)
+            {
+                if (maxSize < expectedTokenSequences[i].Length)
+                {
+                    maxSize = expectedTokenSequences[i].Length;
+                }
+                for (int j = 0; j < expectedTokenSequences[i].Length; j++)
+                {
+                    expected.Append(tokenImage[expectedTokenSequences[i][j]]).Append(' ');
+                }
+                if (expectedTokenSequences[i][expectedTokenSequences[i].Length - 1] != 0)
+                {
+                    expected.Append("...");
+                }
+                expected.Append(eol).Append("    ");
+            }
+            String retval = "Encountered \"";
+            Token tok = currentToken.next;
+            for (int i = 0; i < maxSize; i++)
+            {
+                if (i != 0) retval += " ";
+                if (tok.kind == 0)
+                {
+                    retval += tokenImage[0];
+                    break;
+                }
+                retval += " " + tokenImage[tok.kind];
+                retval += " \"";
+                retval += Add_escapes(tok.image);
+                retval += " \"";
+                tok = tok.next;
+            }
+            retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn;
+            retval += "." + eol;
+            if (expectedTokenSequences.Length == 1)
+            {
+                retval += "Was expecting:" + eol + "    ";
+            }
+            else
+            {
+                retval += "Was expecting one of:" + eol + "    ";
+            }
+            retval += expected.ToString();
+            return retval;
+        }
+
+        protected String eol = ConfigurationManager.AppSettings["line.separator"] ?? "\n";
+
+        static String Add_escapes(String str)
+        {
+            StringBuilder retval = new StringBuilder();
+            char ch;
+            for (int i = 0; i < str.Length; i++)
+            {
+                switch (str[i])
+                {
+                    case (char)0:
+                        continue;
+                    case '\b':
+                        retval.Append("\\b");
+                        continue;
+                    case '\t':
+                        retval.Append("\\t");
+                        continue;
+                    case '\n':
+                        retval.Append("\\n");
+                        continue;
+                    case '\f':
+                        retval.Append("\\f");
+                        continue;
+                    case '\r':
+                        retval.Append("\\r");
+                        continue;
+                    case '\"':
+                        retval.Append("\\\"");
+                        continue;
+                    case '\'':
+                        retval.Append("\\\'");
+                        continue;
+                    case '\\':
+                        retval.Append("\\\\");
+                        continue;
+                    default:
+                        if ((ch = str[i]) < 0x20 || ch > 0x7e)
+                        {
+                            String s = "0000" + Convert.ToString(ch, 16);
+                            retval.Append("\\u" + s.Substring(s.Length - 4, s.Length));
+                        }
+                        else
+                        {
+                            retval.Append(ch);
+                        }
+                        continue;
+                }
+            }
+            return retval.ToString();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Classic/QueryParser.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/QueryParser.cs b/src/contrib/QueryParsers/Classic/QueryParser.cs
new file mode 100644
index 0000000..ca76ac5
--- /dev/null
+++ b/src/contrib/QueryParsers/Classic/QueryParser.cs
@@ -0,0 +1,785 @@
+using Lucene.Net.Analysis;
+using Lucene.Net.Search;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.QueryParsers.Classic
+{
+    public class QueryParser : QueryParserBase, IQueryParserConstants
+    {
+        public enum Operator
+        {
+            OR,
+            AND
+        }
+
+        public QueryParser(Version matchVersion, String f, Analyzer a)
+            : this(new FastCharStream(new StringReader("")))
+        {
+            Init(matchVersion, f, a);
+        }
+
+        public int Conjunction()
+        {
+            int ret = CONJ_NONE;
+            switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+            {
+                case QueryParserConstants.AND:
+                case QueryParserConstants.OR:
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.AND:
+                            jj_consume_token(QueryParserConstants.AND);
+                            ret = CONJ_AND;
+                            break;
+                        case QueryParserConstants.OR:
+                            jj_consume_token(QueryParserConstants.OR);
+                            ret = CONJ_OR;
+                            break;
+                        default:
+                            jj_la1[0] = jj_gen;
+                            jj_consume_token(-1);
+                            throw new ParseException();
+                    }
+                    break;
+                default:
+                    jj_la1[1] = jj_gen;
+                    break;
+            }
+            { if (true) return ret; }
+            throw new Exception("Missing return statement in function");
+        }
+
+        public int Modifiers()
+        {
+            int ret = MOD_NONE;
+            switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+            {
+                case QueryParserConstants.NOT:
+                case QueryParserConstants.PLUS:
+                case QueryParserConstants.MINUS:
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.PLUS:
+                            jj_consume_token(QueryParserConstants.PLUS);
+                            ret = MOD_REQ;
+                            break;
+                        case QueryParserConstants.MINUS:
+                            jj_consume_token(QueryParserConstants.MINUS);
+                            ret = MOD_NOT;
+                            break;
+                        case QueryParserConstants.NOT:
+                            jj_consume_token(QueryParserConstants.NOT);
+                            ret = MOD_NOT;
+                            break;
+                        default:
+                            jj_la1[2] = jj_gen;
+                            jj_consume_token(-1);
+                            throw new ParseException();
+                    }
+                    break;
+                default:
+                    jj_la1[3] = jj_gen;
+                    break;
+            }
+            { if (true) return ret; }
+            throw new Exception("Missing return statement in function");
+        }
+
+        public override Query TopLevelQuery(String field)
+        {
+            Query q;
+            q = Query(field);
+            jj_consume_token(0);
+            { if (true) return q; }
+            throw new Exception("Missing return statement in function");
+        }
+
+        public Query Query(String field)
+        {
+            IList<BooleanClause> clauses = new List<BooleanClause>();
+            Query q, firstQuery = null;
+            int conj, mods;
+            mods = Modifiers();
+            q = Clause(field);
+            AddClause(clauses, CONJ_NONE, mods, q);
+            if (mods == MOD_NONE)
+                firstQuery = q;
+
+            while (true)
+            {
+                bool shouldBreakOuter = false;
+                switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                {
+                    case QueryParserConstants.AND:
+                    case QueryParserConstants.OR:
+                    case QueryParserConstants.NOT:
+                    case QueryParserConstants.PLUS:
+                    case QueryParserConstants.MINUS:
+                    case QueryParserConstants.BAREOPER:
+                    case QueryParserConstants.LPAREN:
+                    case QueryParserConstants.STAR:
+                    case QueryParserConstants.QUOTED:
+                    case QueryParserConstants.TERM:
+                    case QueryParserConstants.PREFIXTERM:
+                    case QueryParserConstants.WILDTERM:
+                    case QueryParserConstants.REGEXPTERM:
+                    case QueryParserConstants.RANGEIN_START:
+                    case QueryParserConstants.RANGEEX_START:
+                    case QueryParserConstants.NUMBER:
+                        ;
+                        break;
+                    default:
+                        jj_la1[4] = jj_gen;
+                        shouldBreakOuter = true;
+                        break;
+                }
+
+                if (shouldBreakOuter) break;
+                conj = Conjunction();
+                mods = Modifiers();
+                q = Clause(field);
+                AddClause(clauses, conj, mods, q);
+            }
+            if (clauses.Count == 1 && firstQuery != null)
+            { if (true) return firstQuery; }
+            else
+            {
+                { if (true) return GetBooleanQuery(clauses); }
+            }
+            throw new Exception("Missing return statement in function");
+        }
+
+        public Query Clause(String field)
+        {
+            Query q;
+            Token fieldToken = null, boost = null;
+            if (jj_2_1(2))
+            {
+                switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                {
+                    case QueryParserConstants.TERM:
+                        fieldToken = jj_consume_token(QueryParserConstants.TERM);
+                        jj_consume_token(QueryParserConstants.COLON);
+                        field = DiscardEscapeChar(fieldToken.image);
+                        break;
+                    case QueryParserConstants.STAR:
+                        jj_consume_token(QueryParserConstants.STAR);
+                        jj_consume_token(QueryParserConstants.COLON);
+                        field = "*";
+                        break;
+                    default:
+                        jj_la1[5] = jj_gen;
+                        jj_consume_token(-1);
+                        throw new ParseException();
+                }
+            }
+            else
+            {
+                ;
+            }
+            switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+            {
+                case QueryParserConstants.BAREOPER:
+                case QueryParserConstants.STAR:
+                case QueryParserConstants.QUOTED:
+                case QueryParserConstants.TERM:
+                case QueryParserConstants.PREFIXTERM:
+                case QueryParserConstants.WILDTERM:
+                case QueryParserConstants.REGEXPTERM:
+                case QueryParserConstants.RANGEIN_START:
+                case QueryParserConstants.RANGEEX_START:
+                case QueryParserConstants.NUMBER:
+                    q = Term(field);
+                    break;
+                case QueryParserConstants.LPAREN:
+                    jj_consume_token(QueryParserConstants.LPAREN);
+                    q = Query(field);
+                    jj_consume_token(QueryParserConstants.RPAREN);
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.CARAT:
+                            jj_consume_token(QueryParserConstants.CARAT);
+                            boost = jj_consume_token(QueryParserConstants.NUMBER);
+                            break;
+                        default:
+                            jj_la1[6] = jj_gen;
+                            break;
+                    }
+                    break;
+                default:
+                    jj_la1[7] = jj_gen;
+                    jj_consume_token(-1);
+                    throw new ParseException();
+            }
+            { if (true) return HandleBoost(q, boost); }
+            throw new Exception("Missing return statement in function");
+        }
+
+        public Query Term(String field)
+        {
+            Token term, boost = null, fuzzySlop = null, goop1, goop2;
+            bool prefix = false;
+            bool wildcard = false;
+            bool fuzzy = false;
+            bool regexp = false;
+            bool startInc = false;
+            bool endInc = false;
+            Query q;
+            switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+            {
+                case QueryParserConstants.BAREOPER:
+                case QueryParserConstants.STAR:
+                case QueryParserConstants.TERM:
+                case QueryParserConstants.PREFIXTERM:
+                case QueryParserConstants.WILDTERM:
+                case QueryParserConstants.REGEXPTERM:
+                case QueryParserConstants.NUMBER:
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.TERM:
+                            term = jj_consume_token(QueryParserConstants.TERM);
+                            break;
+                        case QueryParserConstants.STAR:
+                            term = jj_consume_token(QueryParserConstants.STAR);
+                            wildcard = true;
+                            break;
+                        case QueryParserConstants.PREFIXTERM:
+                            term = jj_consume_token(QueryParserConstants.PREFIXTERM);
+                            prefix = true;
+                            break;
+                        case QueryParserConstants.WILDTERM:
+                            term = jj_consume_token(QueryParserConstants.WILDTERM);
+                            wildcard = true;
+                            break;
+                        case QueryParserConstants.REGEXPTERM:
+                            term = jj_consume_token(QueryParserConstants.REGEXPTERM);
+                            regexp = true;
+                            break;
+                        case QueryParserConstants.NUMBER:
+                            term = jj_consume_token(QueryParserConstants.NUMBER);
+                            break;
+                        case QueryParserConstants.BAREOPER:
+                            term = jj_consume_token(QueryParserConstants.BAREOPER);
+                            term.image = term.image.Substring(0, 1);
+                            break;
+                        default:
+                            jj_la1[8] = jj_gen;
+                            jj_consume_token(-1);
+                            throw new ParseException();
+                    }
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.FUZZY_SLOP:
+                            fuzzySlop = jj_consume_token(QueryParserConstants.FUZZY_SLOP);
+                            fuzzy = true;
+                            break;
+                        default:
+                            jj_la1[9] = jj_gen;
+                            break;
+                    }
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.CARAT:
+                            jj_consume_token(QueryParserConstants.CARAT);
+                            boost = jj_consume_token(QueryParserConstants.NUMBER);
+                            switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                            {
+                                case QueryParserConstants.FUZZY_SLOP:
+                                    fuzzySlop = jj_consume_token(QueryParserConstants.FUZZY_SLOP);
+                                    fuzzy = true;
+                                    break;
+                                default:
+                                    jj_la1[10] = jj_gen;
+                                    break;
+                            }
+                            break;
+                        default:
+                            jj_la1[11] = jj_gen;
+                            break;
+                    }
+                    q = HandleBareTokenQuery(field, term, fuzzySlop, prefix, wildcard, fuzzy, regexp);
+                    break;
+                case QueryParserConstants.RANGEIN_START:
+                case QueryParserConstants.RANGEEX_START:
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.RANGEIN_START:
+                            jj_consume_token(QueryParserConstants.RANGEIN_START);
+                            startInc = true;
+                            break;
+                        case QueryParserConstants.RANGEEX_START:
+                            jj_consume_token(QueryParserConstants.RANGEEX_START);
+                            break;
+                        default:
+                            jj_la1[12] = jj_gen;
+                            jj_consume_token(-1);
+                            throw new ParseException();
+                    }
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.RANGE_GOOP:
+                            goop1 = jj_consume_token(QueryParserConstants.RANGE_GOOP);
+                            break;
+                        case QueryParserConstants.RANGE_QUOTED:
+                            goop1 = jj_consume_token(QueryParserConstants.RANGE_QUOTED);
+                            break;
+                        default:
+                            jj_la1[13] = jj_gen;
+                            jj_consume_token(-1);
+                            throw new ParseException();
+                    }
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.RANGE_TO:
+                            jj_consume_token(QueryParserConstants.RANGE_TO);
+                            break;
+                        default:
+                            jj_la1[14] = jj_gen;
+                            break;
+                    }
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.RANGE_GOOP:
+                            goop2 = jj_consume_token(QueryParserConstants.RANGE_GOOP);
+                            break;
+                        case QueryParserConstants.RANGE_QUOTED:
+                            goop2 = jj_consume_token(QueryParserConstants.RANGE_QUOTED);
+                            break;
+                        default:
+                            jj_la1[15] = jj_gen;
+                            jj_consume_token(-1);
+                            throw new ParseException();
+                    }
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.RANGEIN_END:
+                            jj_consume_token(QueryParserConstants.RANGEIN_END);
+                            endInc = true;
+                            break;
+                        case QueryParserConstants.RANGEEX_END:
+                            jj_consume_token(QueryParserConstants.RANGEEX_END);
+                            break;
+                        default:
+                            jj_la1[16] = jj_gen;
+                            jj_consume_token(-1);
+                            throw new ParseException();
+                    }
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.CARAT:
+                            jj_consume_token(QueryParserConstants.CARAT);
+                            boost = jj_consume_token(QueryParserConstants.NUMBER);
+                            break;
+                        default:
+                            jj_la1[17] = jj_gen;
+                            break;
+                    }
+                    bool startOpen = false;
+                    bool endOpen = false;
+                    if (goop1.kind == QueryParserConstants.RANGE_QUOTED)
+                    {
+                        goop1.image = goop1.image.Substring(1, goop1.image.Length - 1);
+                    }
+                    else if ("*".Equals(goop1.image))
+                    {
+                        startOpen = true;
+                    }
+                    if (goop2.kind == QueryParserConstants.RANGE_QUOTED)
+                    {
+                        goop2.image = goop2.image.Substring(1, goop2.image.Length - 1);
+                    }
+                    else if ("*".Equals(goop2.image))
+                    {
+                        endOpen = true;
+                    }
+                    q = GetRangeQuery(field, startOpen ? null : DiscardEscapeChar(goop1.image), endOpen ? null : DiscardEscapeChar(goop2.image), startInc, endInc);
+                    break;
+                case QueryParserConstants.QUOTED:
+                    term = jj_consume_token(QueryParserConstants.QUOTED);
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.FUZZY_SLOP:
+                            fuzzySlop = jj_consume_token(QueryParserConstants.FUZZY_SLOP);
+                            break;
+                        default:
+                            jj_la1[18] = jj_gen;
+                            break;
+                    }
+                    switch ((_jj_ntk == -1) ? jj_ntk() : _jj_ntk)
+                    {
+                        case QueryParserConstants.CARAT:
+                            jj_consume_token(QueryParserConstants.CARAT);
+                            boost = jj_consume_token(QueryParserConstants.NUMBER);
+                            break;
+                        default:
+                            jj_la1[19] = jj_gen;
+                            break;
+                    }
+                    q = HandleQuotedTerm(field, term, fuzzySlop);
+                    break;
+                default:
+                    jj_la1[20] = jj_gen;
+                    jj_consume_token(-1);
+                    throw new ParseException();
+            }
+            { if (true) return HandleBoost(q, boost); }
+            throw new Exception("Missing return statement in function");
+        }
+
+        private bool jj_2_1(int xla)
+        {
+            jj_la = xla; jj_lastpos = jj_scanpos = token;
+            try { return !jj_3_1(); }
+            catch (LookaheadSuccess ls) { return true; }
+            finally { jj_save(0, xla); }
+        }
+
+        private bool jj_3R_2()
+        {
+            if (jj_scan_token(QueryParserConstants.TERM)) return true;
+            if (jj_scan_token(QueryParserConstants.COLON)) return true;
+            return false;
+        }
+
+        private bool jj_3_1()
+        {
+            Token xsp;
+            xsp = jj_scanpos;
+            if (jj_3R_2())
+            {
+                jj_scanpos = xsp;
+                if (jj_3R_3()) return true;
+            }
+            return false;
+        }
+
+        private bool jj_3R_3()
+        {
+            if (jj_scan_token(QueryParserConstants.STAR)) return true;
+            if (jj_scan_token(QueryParserConstants.COLON)) return true;
+            return false;
+        }
+
+        /** Generated Token Manager. */
+        public QueryParserTokenManager token_source;
+        /** Current token. */
+        public Token token;
+        /** Next token. */
+        public Token jj_nt;
+        private int _jj_ntk;
+        private Token jj_scanpos, jj_lastpos;
+        private int jj_la;
+        private int jj_gen;
+        private readonly int[] jj_la1 = new int[21];
+        static private int[] jj_la1_0;
+        static private int[] jj_la1_1;
+
+        static QueryParser()
+        {
+            jj_la1_init_0();
+            jj_la1_init_1();
+        }
+
+        private static void jj_la1_init_0()
+        {
+            jj_la1_0 = new int[] { 0x300, 0x300, 0x1c00, 0x1c00, 0xfda7f00, 0x120000, 0x40000, 0xfda6000, 0x9d22000, 0x200000, 0x200000, 0x40000, 0x6000000, unchecked((int)0x80000000), 0x10000000, unchecked((int)0x80000000), 0x60000000, 0x40000, 0x200000, 0x40000, 0xfda2000, };
+        }
+        private static void jj_la1_init_1()
+        {
+            jj_la1_1 = new int[] { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, };
+        }
+
+        private readonly JJCalls[] jj_2_rtns = new JJCalls[1];
+        private bool jj_rescan = false;
+        private int jj_gc = 0;
+
+        /** Constructor with user supplied CharStream. */
+        protected QueryParser(ICharStream stream)
+        {
+            token_source = new QueryParserTokenManager(stream);
+            token = new Token();
+            _jj_ntk = -1;
+            jj_gen = 0;
+            for (int i = 0; i < 21; i++) jj_la1[i] = -1;
+            for (int i = 0; i < jj_2_rtns.Length; i++) jj_2_rtns[i] = new JJCalls();
+        }
+
+        /** Reinitialise. */
+        public override void ReInit(ICharStream stream)
+        {
+            token_source.ReInit(stream);
+            token = new Token();
+            _jj_ntk = -1;
+            jj_gen = 0;
+            for (int i = 0; i < 21; i++) jj_la1[i] = -1;
+            for (int i = 0; i < jj_2_rtns.Length; i++) jj_2_rtns[i] = new JJCalls();
+        }
+
+        /** Constructor with generated Token Manager. */
+        protected QueryParser(QueryParserTokenManager tm)
+        {
+            token_source = tm;
+            token = new Token();
+            _jj_ntk = -1;
+            jj_gen = 0;
+            for (int i = 0; i < 21; i++) jj_la1[i] = -1;
+            for (int i = 0; i < jj_2_rtns.Length; i++) jj_2_rtns[i] = new JJCalls();
+        }
+
+        /** Reinitialise. */
+        public void ReInit(QueryParserTokenManager tm)
+        {
+            token_source = tm;
+            token = new Token();
+            _jj_ntk = -1;
+            jj_gen = 0;
+            for (int i = 0; i < 21; i++) jj_la1[i] = -1;
+            for (int i = 0; i < jj_2_rtns.Length; i++) jj_2_rtns[i] = new JJCalls();
+        }
+
+        private Token jj_consume_token(int kind)
+        {
+            Token oldToken;
+            if ((oldToken = token).next != null) token = token.next;
+            else token = token.next = token_source.GetNextToken();
+            _jj_ntk = -1;
+            if (token.kind == kind)
+            {
+                jj_gen++;
+                if (++jj_gc > 100)
+                {
+                    jj_gc = 0;
+                    for (int i = 0; i < jj_2_rtns.Length; i++)
+                    {
+                        JJCalls c = jj_2_rtns[i];
+                        while (c != null)
+                        {
+                            if (c.gen < jj_gen) c.first = null;
+                            c = c.next;
+                        }
+                    }
+                }
+                return token;
+            }
+            token = oldToken;
+            jj_kind = kind;
+            throw GenerateParseException();
+        }
+
+        private sealed class LookaheadSuccess : Exception { }
+
+        private readonly LookaheadSuccess jj_ls = new LookaheadSuccess();
+
+        private bool jj_scan_token(int kind)
+        {
+            if (jj_scanpos == jj_lastpos)
+            {
+                jj_la--;
+                if (jj_scanpos.next == null)
+                {
+                    jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.GetNextToken();
+                }
+                else
+                {
+                    jj_lastpos = jj_scanpos = jj_scanpos.next;
+                }
+            }
+            else
+            {
+                jj_scanpos = jj_scanpos.next;
+            }
+            if (jj_rescan)
+            {
+                int i = 0; Token tok = token;
+                while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
+                if (tok != null) jj_add_error_token(kind, i);
+            }
+            if (jj_scanpos.kind != kind) return true;
+            if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
+            return false;
+        }
+
+        /** Get the next Token. */
+        public Token GetNextToken()
+        {
+            if (token.next != null) token = token.next;
+            else token = token.next = token_source.GetNextToken();
+            _jj_ntk = -1;
+            jj_gen++;
+            return token;
+        }
+
+        /** Get the specific Token. */
+        public Token GetToken(int index)
+        {
+            Token t = token;
+            for (int i = 0; i < index; i++)
+            {
+                if (t.next != null) t = t.next;
+                else t = t.next = token_source.GetNextToken();
+            }
+            return t;
+        }
+
+        private int jj_ntk()
+        {
+            if ((jj_nt = token.next) == null)
+                return (_jj_ntk = (token.next = token_source.GetNextToken()).kind);
+            else
+                return (_jj_ntk = jj_nt.kind);
+        }
+
+        private IList<int[]> jj_expentries = new List<int[]>();
+        private int[] jj_expentry;
+        private int jj_kind = -1;
+        private int[] jj_lasttokens = new int[100];
+        private int jj_endpos;
+
+        private void jj_add_error_token(int kind, int pos)
+        {
+            if (pos >= 100) return;
+            if (pos == jj_endpos + 1)
+            {
+                jj_lasttokens[jj_endpos++] = kind;
+            }
+            else if (jj_endpos != 0)
+            {
+                jj_expentry = new int[jj_endpos];
+                for (int i = 0; i < jj_endpos; i++)
+                {
+                    jj_expentry[i] = jj_lasttokens[i];
+                }
+
+                foreach (int[] oldentry in jj_expentries)
+                {
+                    bool shouldContinueOuter = false;
+                    if (oldentry.Length == jj_expentry.Length)
+                    {
+                        for (int i = 0; i < jj_expentry.Length; i++)
+                        {
+                            if (oldentry[i] != jj_expentry[i])
+                            {
+                                shouldContinueOuter = true;
+                                break;
+                            }
+                        }
+
+                        if (shouldContinueOuter)
+                            continue;
+                        jj_expentries.Add(jj_expentry);
+                        break;
+                    }
+                }
+                if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind;
+            }
+        }
+
+        /** Generate ParseException. */
+        public ParseException GenerateParseException()
+        {
+            jj_expentries.Clear();
+            bool[] la1tokens = new bool[33];
+            if (jj_kind >= 0)
+            {
+                la1tokens[jj_kind] = true;
+                jj_kind = -1;
+            }
+            for (int i = 0; i < 21; i++)
+            {
+                if (jj_la1[i] == jj_gen)
+                {
+                    for (int j = 0; j < 32; j++)
+                    {
+                        if ((jj_la1_0[i] & (1 << j)) != 0)
+                        {
+                            la1tokens[j] = true;
+                        }
+                        if ((jj_la1_1[i] & (1 << j)) != 0)
+                        {
+                            la1tokens[32 + j] = true;
+                        }
+                    }
+                }
+            }
+            for (int i = 0; i < 33; i++)
+            {
+                if (la1tokens[i])
+                {
+                    jj_expentry = new int[1];
+                    jj_expentry[0] = i;
+                    jj_expentries.Add(jj_expentry);
+                }
+            }
+            jj_endpos = 0;
+            jj_rescan_token();
+            jj_add_error_token(0, 0);
+            int[][] exptokseq = new int[jj_expentries.Count][];
+            for (int i = 0; i < jj_expentries.Count; i++)
+            {
+                exptokseq[i] = jj_expentries[i];
+            }
+            return new ParseException(token, exptokseq, QueryParserConstants.tokenImage);
+        }
+
+        /** Enable tracing. */
+        public virtual void EnableTracing()
+        {
+        }
+
+        /** Disable tracing. */
+        public virtual void DisableTracing()
+        {
+        }
+
+        private void jj_rescan_token()
+        {
+            jj_rescan = true;
+            for (int i = 0; i < 1; i++)
+            {
+                try
+                {
+                    JJCalls p = jj_2_rtns[i];
+                    do
+                    {
+                        if (p.gen > jj_gen)
+                        {
+                            jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
+                            switch (i)
+                            {
+                                case 0: jj_3_1(); break;
+                            }
+                        }
+                        p = p.next;
+                    } while (p != null);
+                }
+                catch (LookaheadSuccess ls) { }
+            }
+            jj_rescan = false;
+        }
+
+        private void jj_save(int index, int xla)
+        {
+            JJCalls p = jj_2_rtns[index];
+            while (p.gen > jj_gen)
+            {
+                if (p.next == null) { p = p.next = new JJCalls(); break; }
+                p = p.next;
+            }
+            p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla;
+        }
+
+        internal sealed class JJCalls
+        {
+            public int gen;
+            public Token first;
+            public int arg;
+            public JJCalls next;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/0e6eb14a/src/contrib/QueryParsers/Classic/QueryParserBase.cs
----------------------------------------------------------------------
diff --git a/src/contrib/QueryParsers/Classic/QueryParserBase.cs b/src/contrib/QueryParsers/Classic/QueryParserBase.cs
new file mode 100644
index 0000000..5425f0c
--- /dev/null
+++ b/src/contrib/QueryParsers/Classic/QueryParserBase.cs
@@ -0,0 +1,1033 @@
+using Lucene.Net.Analysis;
+using Lucene.Net.Analysis.Tokenattributes;
+using Lucene.Net.Documents;
+using Lucene.Net.Index;
+using Lucene.Net.QueryParsers.Flexible.Standard;
+using Lucene.Net.Search;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using System;
+using System.Collections.Generic;
+using System.Globalization;
+using System.IO;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using Operator = Lucene.Net.QueryParsers.Classic.QueryParser.Operator;
+using Version = Lucene.Net.Util.Version;
+
+namespace Lucene.Net.QueryParsers.Classic
+{
+    public abstract class QueryParserBase : ICommonQueryParserConfiguration
+    {
+        /** Do not catch this exception in your code, it means you are using methods that you should no longer use. */
+        public class MethodRemovedUseAnother : Exception { }
+
+        internal const int CONJ_NONE = 0;
+        internal const int CONJ_AND = 1;
+        internal const int CONJ_OR = 2;
+
+        internal const int MOD_NONE = 0;
+        internal const int MOD_NOT = 10;
+        internal const int MOD_REQ = 11;
+
+        // make it possible to call setDefaultOperator() without accessing
+        // the nested class:
+        /** Alternative form of QueryParser.Operator.AND */
+        public static readonly Operator AND_OPERATOR = Operator.AND;
+        /** Alternative form of QueryParser.Operator.OR */
+        public static readonly Operator OR_OPERATOR = Operator.OR;
+
+        /** The actual operator that parser uses to combine query terms */
+        internal Operator operator_renamed = OR_OPERATOR;
+
+        internal bool lowercaseExpandedTerms = true;
+        internal MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+        internal bool allowLeadingWildcard = false;
+        internal bool enablePositionIncrements = true;
+
+        internal Analyzer analyzer;
+        internal String field;
+        internal int phraseSlop = 0;
+        internal float fuzzyMinSim = FuzzyQuery.defaultMinSimilarity;
+        internal int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength;
+        internal CultureInfo locale = CultureInfo.InvariantCulture;
+        internal TimeZone timeZone = TimeZone.CurrentTimeZone;
+
+        // the default date resolution
+        internal DateTools.Resolution dateResolution = null;
+        // maps field names to date resolutions
+        internal IDictionary<String, DateTools.Resolution> fieldToDateResolution = null;
+
+        //Whether or not to analyze range terms when constructing RangeQuerys
+        // (For example, analyzing terms into collation keys for locale-sensitive RangeQuery)
+        internal bool analyzeRangeTerms = false;
+
+        internal bool autoGeneratePhraseQueries;
+
+        // So the generated QueryParser(CharStream) won't error out
+        protected QueryParserBase()
+        {
+        }
+
+        public void Init(Version matchVersion, String f, Analyzer a)
+        {
+            analyzer = a;
+            field = f;
+            if (matchVersion.OnOrAfter(Version.LUCENE_31))
+            {
+                AutoGeneratePhraseQueries = false;
+            }
+            else
+            {
+                AutoGeneratePhraseQueries = true;
+            }
+        }
+
+        // the generated parser will create these in QueryParser
+        public abstract void ReInit(ICharStream stream);
+        public abstract Query TopLevelQuery(String field);
+
+        public Query Parse(String query)
+        {
+            ReInit(new FastCharStream(new StringReader(query)));
+            try
+            {
+                // TopLevelQuery is a Query followed by the end-of-input (EOF)
+                Query res = TopLevelQuery(field);
+                return res != null ? res : NewBooleanQuery(false);
+            }
+            catch (ParseException tme)
+            {
+                // rethrow to include the original query:
+                ParseException e = new ParseException("Cannot parse '" + query + "': " + tme.Message, tme);
+                throw e;
+            }
+            catch (TokenMgrError tme)
+            {
+                ParseException e = new ParseException("Cannot parse '" + query + "': " + tme.Message, tme);
+                throw e;
+            }
+            catch (BooleanQuery.TooManyClauses tmc)
+            {
+                ParseException e = new ParseException("Cannot parse '" + query + "': too many boolean clauses", tmc);
+                throw e;
+            }
+        }
+
+        public Analyzer Analyzer
+        {
+            get { return analyzer; }
+        }
+
+        public string Field
+        {
+            get { return field; }
+        }
+
+        public bool AutoGeneratePhraseQueries
+        {
+            get { return autoGeneratePhraseQueries; }
+            set { autoGeneratePhraseQueries = value; }
+        }
+
+        public float FuzzyMinSim
+        {
+            get { return fuzzyMinSim; }
+            set { fuzzyMinSim = value; }
+        }
+
+        public int FuzzyPrefixLength
+        {
+            get { return fuzzyPrefixLength; }
+            set { fuzzyPrefixLength = value; }
+        }
+
+        public int PhraseSlop
+        {
+            get { return phraseSlop; }
+            set { phraseSlop = value; }
+        }
+
+        public bool AllowLeadingWildcard
+        {
+            get { return allowLeadingWildcard; }
+            set { allowLeadingWildcard = value; }
+        }
+
+        public bool EnablePositionIncrements
+        {
+            get { return enablePositionIncrements; }
+            set { enablePositionIncrements = value; }
+        }
+
+        public Operator DefaultOperator
+        {
+            get { return operator_renamed; }
+            set { operator_renamed = value; }
+        }
+
+        public bool LowercaseExpandedTerms
+        {
+            get { return lowercaseExpandedTerms; }
+            set { lowercaseExpandedTerms = value; }
+        }
+
+        public MultiTermQuery.RewriteMethod MultiTermRewriteMethod
+        {
+            get { return multiTermRewriteMethod; }
+            set { multiTermRewriteMethod = value; }
+        }
+
+        public CultureInfo Locale
+        {
+            get { return locale; }
+            set { locale = value; }
+        }
+
+        public TimeZone TimeZone
+        {
+            get { return timeZone; }
+            set { timeZone = value; }
+        }
+
+        public DateTools.Resolution DateResolution
+        {
+            get { return dateResolution; }
+            set { dateResolution = value; }
+        }
+
+        public void SetDateResolution(string fieldName, DateTools.Resolution dateResolution)
+        {
+            if (fieldName == null)
+            {
+                throw new ArgumentException("Field cannot be null.");
+            }
+
+            if (fieldToDateResolution == null)
+            {
+                // lazily initialize HashMap
+                fieldToDateResolution = new HashMap<String, DateTools.Resolution>();
+            }
+
+            fieldToDateResolution[fieldName] = dateResolution;
+        }
+
+        public DateTools.Resolution GetDateResolution(string fieldName)
+        {
+            if (fieldName == null)
+            {
+                throw new ArgumentException("Field cannot be null.");
+            }
+
+            if (fieldToDateResolution == null)
+            {
+                // no field specific date resolutions set; return default date resolution instead
+                return this.dateResolution;
+            }
+
+            DateTools.Resolution resolution = fieldToDateResolution[fieldName];
+            if (resolution == null)
+            {
+                // no date resolutions set for the given field; return default date resolution instead
+                resolution = this.dateResolution;
+            }
+
+            return resolution;
+        }
+
+        public bool AnalyzeRangeTerms
+        {
+            get { return analyzeRangeTerms; }
+            set { analyzeRangeTerms = value; }
+        }
+
+        protected void AddClause(IList<BooleanClause> clauses, int conj, int mods, Query q)
+        {
+            bool required, prohibited;
+
+            // If this term is introduced by AND, make the preceding term required,
+            // unless it's already prohibited
+            if (clauses.Count > 0 && conj == CONJ_AND)
+            {
+                BooleanClause c = clauses[clauses.Count - 1];
+                if (!c.IsProhibited)
+                    c.Occur = Occur.MUST;
+            }
+
+            if (clauses.Count > 0 && operator_renamed == AND_OPERATOR && conj == CONJ_OR)
+            {
+                // If this term is introduced by OR, make the preceding term optional,
+                // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
+                // notice if the input is a OR b, first term is parsed as required; without
+                // this modification a OR b would parsed as +a OR b
+                BooleanClause c = clauses[clauses.Count - 1];
+                if (!c.IsProhibited)
+                    c.Occur = Occur.SHOULD;
+            }
+
+            // We might have been passed a null query; the term might have been
+            // filtered away by the analyzer.
+            if (q == null)
+                return;
+
+            if (operator_renamed == OR_OPERATOR)
+            {
+                // We set REQUIRED if we're introduced by AND or +; PROHIBITED if
+                // introduced by NOT or -; make sure not to set both.
+                prohibited = (mods == MOD_NOT);
+                required = (mods == MOD_REQ);
+                if (conj == CONJ_AND && !prohibited)
+                {
+                    required = true;
+                }
+            }
+            else
+            {
+                // We set PROHIBITED if we're introduced by NOT or -; We set REQUIRED
+                // if not PROHIBITED and not introduced by OR
+                prohibited = (mods == MOD_NOT);
+                required = (!prohibited && conj != CONJ_OR);
+            }
+            if (required && !prohibited)
+                clauses.Add(NewBooleanClause(q, Occur.MUST));
+            else if (!required && !prohibited)
+                clauses.Add(NewBooleanClause(q, Occur.SHOULD));
+            else if (!required && prohibited)
+                clauses.Add(NewBooleanClause(q, Occur.MUST_NOT));
+            else
+                throw new SystemException("Clause cannot be both required and prohibited");
+        }
+
+        protected Query GetFieldQuery(String field, String queryText, bool quoted)
+        {
+            return NewFieldQuery(analyzer, field, queryText, quoted);
+        }
+
+        protected Query NewFieldQuery(Analyzer analyzer, String field, String queryText, bool quoted)
+        {
+            // Use the analyzer to get all the tokens, and then build a TermQuery,
+            // PhraseQuery, or nothing based on the term count
+
+            TokenStream source;
+            try
+            {
+                source = analyzer.TokenStream(field, new StringReader(queryText));
+                source.Reset();
+            }
+            catch (IOException e)
+            {
+                ParseException p = new ParseException("Unable to initialize TokenStream to analyze query text", e);
+                throw p;
+            }
+            CachingTokenFilter buffer = new CachingTokenFilter(source);
+            ITermToBytesRefAttribute termAtt = null;
+            IPositionIncrementAttribute posIncrAtt = null;
+            int numTokens = 0;
+
+            buffer.Reset();
+
+            if (buffer.HasAttribute<ITermToBytesRefAttribute>())
+            {
+                termAtt = buffer.GetAttribute<ITermToBytesRefAttribute>();
+            }
+            if (buffer.HasAttribute<IPositionIncrementAttribute>())
+            {
+                posIncrAtt = buffer.GetAttribute<IPositionIncrementAttribute>();
+            }
+
+            int positionCount = 0;
+            bool severalTokensAtSamePosition = false;
+
+            bool hasMoreTokens = false;
+            if (termAtt != null)
+            {
+                try
+                {
+                    hasMoreTokens = buffer.IncrementToken();
+                    while (hasMoreTokens)
+                    {
+                        numTokens++;
+                        int positionIncrement = (posIncrAtt != null) ? posIncrAtt.PositionIncrement : 1;
+                        if (positionIncrement != 0)
+                        {
+                            positionCount += positionIncrement;
+                        }
+                        else
+                        {
+                            severalTokensAtSamePosition = true;
+                        }
+                        hasMoreTokens = buffer.IncrementToken();
+                    }
+                }
+                catch (IOException e)
+                {
+                    // ignore
+                }
+            }
+            try
+            {
+                // rewind the buffer stream
+                buffer.Reset();
+
+                // close original stream - all tokens buffered
+                source.Dispose();
+            }
+            catch (IOException e)
+            {
+                ParseException p = new ParseException("Cannot close TokenStream analyzing query text", e);
+                throw p;
+            }
+
+            BytesRef bytes = termAtt == null ? null : termAtt.BytesRef;
+
+            if (numTokens == 0)
+                return null;
+            else if (numTokens == 1)
+            {
+                try
+                {
+                    bool hasNext = buffer.IncrementToken();
+                    //assert hasNext == true;
+                    termAtt.FillBytesRef();
+                }
+                catch (IOException e)
+                {
+                    // safe to ignore, because we know the number of tokens
+                }
+                return NewTermQuery(new Term(field, BytesRef.DeepCopyOf(bytes)));
+            }
+            else
+            {
+                if (severalTokensAtSamePosition || (!quoted && !autoGeneratePhraseQueries))
+                {
+                    if (positionCount == 1 || (!quoted && !autoGeneratePhraseQueries))
+                    {
+                        // no phrase query:
+
+                        if (positionCount == 1)
+                        {
+                            // simple case: only one position, with synonyms
+                            BooleanQuery q = NewBooleanQuery(true);
+                            for (int i = 0; i < numTokens; i++)
+                            {
+                                try
+                                {
+                                    bool hasNext = buffer.IncrementToken();
+                                    //assert hasNext == true;
+                                    termAtt.FillBytesRef();
+                                }
+                                catch (IOException e)
+                                {
+                                    // safe to ignore, because we know the number of tokens
+                                }
+                                Query currentQuery = NewTermQuery(
+                                    new Term(field, BytesRef.DeepCopyOf(bytes)));
+                                q.Add(currentQuery, Occur.SHOULD);
+                            }
+                            return q;
+                        }
+                        else
+                        {
+                            // multiple positions
+                            BooleanQuery q = NewBooleanQuery(false);
+                            Occur occur = operator_renamed == Operator.AND ? Occur.MUST : Occur.SHOULD;
+                            Query currentQuery = null;
+                            for (int i = 0; i < numTokens; i++)
+                            {
+                                try
+                                {
+                                    bool hasNext = buffer.IncrementToken();
+                                    //assert hasNext == true;
+                                    termAtt.FillBytesRef();
+                                }
+                                catch (IOException e)
+                                {
+                                    // safe to ignore, because we know the number of tokens
+                                }
+                                if (posIncrAtt != null && posIncrAtt.PositionIncrement == 0)
+                                {
+                                    if (!(currentQuery is BooleanQuery))
+                                    {
+                                        Query t = currentQuery;
+                                        currentQuery = NewBooleanQuery(true);
+                                        ((BooleanQuery)currentQuery).Add(t, Occur.SHOULD);
+                                    }
+                                    ((BooleanQuery)currentQuery).Add(NewTermQuery(new Term(field, BytesRef.DeepCopyOf(bytes))), Occur.SHOULD);
+                                }
+                                else
+                                {
+                                    if (currentQuery != null)
+                                    {
+                                        q.Add(currentQuery, occur);
+                                    }
+                                    currentQuery = NewTermQuery(new Term(field, BytesRef.DeepCopyOf(bytes)));
+                                }
+                            }
+                            q.Add(currentQuery, occur);
+                            return q;
+                        }
+                    }
+                    else
+                    {
+                        // phrase query:
+                        MultiPhraseQuery mpq = NewMultiPhraseQuery();
+                        mpq.Slop = phraseSlop;
+                        List<Term> multiTerms = new List<Term>();
+                        int position = -1;
+                        for (int i = 0; i < numTokens; i++)
+                        {
+                            int positionIncrement = 1;
+                            try
+                            {
+                                bool hasNext = buffer.IncrementToken();
+                                //assert hasNext == true;
+                                termAtt.FillBytesRef();
+                                if (posIncrAtt != null)
+                                {
+                                    positionIncrement = posIncrAtt.PositionIncrement;
+                                }
+                            }
+                            catch (IOException e)
+                            {
+                                // safe to ignore, because we know the number of tokens
+                            }
+
+                            if (positionIncrement > 0 && multiTerms.Count > 0)
+                            {
+                                if (enablePositionIncrements)
+                                {
+                                    mpq.Add(multiTerms.ToArray(), position);
+                                }
+                                else
+                                {
+                                    mpq.Add(multiTerms.ToArray());
+                                }
+                                multiTerms.Clear();
+                            }
+                            position += positionIncrement;
+                            multiTerms.Add(new Term(field, BytesRef.DeepCopyOf(bytes)));
+                        }
+                        if (enablePositionIncrements)
+                        {
+                            mpq.Add(multiTerms.ToArray(), position);
+                        }
+                        else
+                        {
+                            mpq.Add(multiTerms.ToArray());
+                        }
+                        return mpq;
+                    }
+                }
+                else
+                {
+                    PhraseQuery pq = NewPhraseQuery();
+                    pq.Slop = phraseSlop;
+                    int position = -1;
+
+                    for (int i = 0; i < numTokens; i++)
+                    {
+                        int positionIncrement = 1;
+
+                        try
+                        {
+                            bool hasNext = buffer.IncrementToken();
+                            //assert hasNext == true;
+                            termAtt.FillBytesRef();
+                            if (posIncrAtt != null)
+                            {
+                                positionIncrement = posIncrAtt.PositionIncrement;
+                            }
+                        }
+                        catch (IOException e)
+                        {
+                            // safe to ignore, because we know the number of tokens
+                        }
+
+                        if (enablePositionIncrements)
+                        {
+                            position += positionIncrement;
+                            pq.Add(new Term(field, BytesRef.DeepCopyOf(bytes)), position);
+                        }
+                        else
+                        {
+                            pq.Add(new Term(field, BytesRef.DeepCopyOf(bytes)));
+                        }
+                    }
+                    return pq;
+                }
+            }
+        }
+
+        protected Query GetFieldQuery(String field, String queryText, int slop)
+        {
+            Query query = GetFieldQuery(field, queryText, true);
+
+            if (query is PhraseQuery)
+            {
+                ((PhraseQuery)query).Slop = slop;
+            }
+            if (query is MultiPhraseQuery)
+            {
+                ((MultiPhraseQuery)query).Slop = slop;
+            }
+
+            return query;
+        }
+
+        protected Query GetRangeQuery(String field,
+                                String part1,
+                                String part2,
+                                bool startInclusive,
+                                bool endInclusive)
+        {
+            if (lowercaseExpandedTerms)
+            {
+                part1 = part1 == null ? null : part1.ToLower(locale);
+                part2 = part2 == null ? null : part2.ToLower(locale);
+            }
+
+
+            //DateTimeFormatInfo df = DateTimeFormatInfo.GetInstance(locale);
+            //df.setLenient(true);
+            DateTools.Resolution resolution = GetDateResolution(field);
+
+            try
+            {
+                part1 = DateTools.DateToString(DateTime.Parse(part1, locale), resolution);
+            }
+            catch (Exception e) { }
+
+            try
+            {
+                DateTime d2 = DateTime.Parse(part2, locale);
+                if (endInclusive)
+                {
+                    // The user can only specify the date, not the time, so make sure
+                    // the time is set to the latest possible time of that date to really
+                    // include all documents:
+                    d2 = d2.AddHours(23);
+                    d2 = d2.AddMinutes(59);
+                    d2 = d2.AddSeconds(59);
+                    d2 = d2.AddMilliseconds(999);
+                    // .NET Port TODO: is this right?
+                }
+                part2 = DateTools.DateToString(d2, resolution);
+            }
+            catch (Exception e) { }
+
+            return NewRangeQuery(field, part1, part2, startInclusive, endInclusive);
+        }
+
+        protected BooleanQuery NewBooleanQuery(bool disableCoord)
+        {
+            return new BooleanQuery(disableCoord);
+        }
+
+        protected BooleanClause NewBooleanClause(Query q, Occur occur)
+        {
+            return new BooleanClause(q, occur);
+        }
+
+        protected Query NewTermQuery(Term term)
+        {
+            return new TermQuery(term);
+        }
+
+        protected PhraseQuery NewPhraseQuery()
+        {
+            return new PhraseQuery();
+        }
+
+        protected MultiPhraseQuery NewMultiPhraseQuery()
+        {
+            return new MultiPhraseQuery();
+        }
+
+        protected Query NewPrefixQuery(Term prefix)
+        {
+            PrefixQuery query = new PrefixQuery(prefix);
+            query.SetRewriteMethod(multiTermRewriteMethod);
+            return query;
+        }
+
+        protected Query NewRegexpQuery(Term regexp)
+        {
+            RegexpQuery query = new RegexpQuery(regexp);
+            query.SetRewriteMethod(multiTermRewriteMethod);
+            return query;
+        }
+
+        protected Query NewFuzzyQuery(Term term, float minimumSimilarity, int prefixLength)
+        {
+            // FuzzyQuery doesn't yet allow constant score rewrite
+            String text = term.Text;
+            int numEdits = FuzzyQuery.FloatToEdits(minimumSimilarity,
+                text.Length);
+            return new FuzzyQuery(term, numEdits, prefixLength);
+        }
+
+        // TODO: Should this be protected instead?
+        private BytesRef AnalyzeMultitermTerm(String field, String part)
+        {
+            return AnalyzeMultitermTerm(field, part, analyzer);
+        }
+
+        protected BytesRef AnalyzeMultitermTerm(String field, String part, Analyzer analyzerIn)
+        {
+            TokenStream source;
+
+            if (analyzerIn == null) analyzerIn = analyzer;
+
+            try
+            {
+                source = analyzerIn.TokenStream(field, new StringReader(part));
+                source.Reset();
+            }
+            catch (IOException e)
+            {
+                throw new SystemException("Unable to initialize TokenStream to analyze multiTerm term: " + part, e);
+            }
+
+            ITermToBytesRefAttribute termAtt = source.GetAttribute<ITermToBytesRefAttribute>();
+            BytesRef bytes = termAtt.BytesRef;
+
+            try
+            {
+                if (!source.IncrementToken())
+                    throw new ArgumentException("analyzer returned no terms for multiTerm term: " + part);
+                termAtt.FillBytesRef();
+                if (source.IncrementToken())
+                    throw new ArgumentException("analyzer returned too many terms for multiTerm term: " + part);
+            }
+            catch (IOException e)
+            {
+                throw new SystemException("error analyzing range part: " + part, e);
+            }
+
+            try
+            {
+                source.End();
+                source.Dispose();
+            }
+            catch (IOException e)
+            {
+                throw new SystemException("Unable to end & close TokenStream after analyzing multiTerm term: " + part, e);
+            }
+
+            return BytesRef.DeepCopyOf(bytes);
+        }
+
+        protected Query NewRangeQuery(String field, String part1, String part2, bool startInclusive, bool endInclusive)
+        {
+            BytesRef start;
+            BytesRef end;
+
+            if (part1 == null)
+            {
+                start = null;
+            }
+            else
+            {
+                start = analyzeRangeTerms ? AnalyzeMultitermTerm(field, part1) : new BytesRef(part1);
+            }
+
+            if (part2 == null)
+            {
+                end = null;
+            }
+            else
+            {
+                end = analyzeRangeTerms ? AnalyzeMultitermTerm(field, part2) : new BytesRef(part2);
+            }
+
+            TermRangeQuery query = new TermRangeQuery(field, start, end, startInclusive, endInclusive);
+
+            query.SetRewriteMethod(multiTermRewriteMethod);
+            return query;
+        }
+
+        protected Query NewMatchAllDocsQuery()
+        {
+            return new MatchAllDocsQuery();
+        }
+
+        protected Query NewWildcardQuery(Term t)
+        {
+            WildcardQuery query = new WildcardQuery(t);
+            query.SetRewriteMethod(multiTermRewriteMethod);
+            return query;
+        }
+
+        protected Query GetBooleanQuery(IList<BooleanClause> clauses)
+        {
+            return GetBooleanQuery(clauses, false);
+        }
+
+        protected Query GetBooleanQuery(IList<BooleanClause> clauses, bool disableCoord)
+        {
+            if (clauses.Count == 0)
+            {
+                return null; // all clause words were filtered away by the analyzer.
+            }
+            BooleanQuery query = NewBooleanQuery(disableCoord);
+            foreach (BooleanClause clause in clauses)
+            {
+                query.Add(clause);
+            }
+            return query;
+        }
+
+        protected Query GetWildcardQuery(String field, String termStr)
+        {
+            if ("*".Equals(field))
+            {
+                if ("*".Equals(termStr)) return NewMatchAllDocsQuery();
+            }
+            if (!allowLeadingWildcard && (termStr.StartsWith("*") || termStr.StartsWith("?")))
+                throw new ParseException("'*' or '?' not allowed as first character in WildcardQuery");
+            if (lowercaseExpandedTerms)
+            {
+                termStr = termStr.ToLower(locale);
+            }
+            Term t = new Term(field, termStr);
+            return NewWildcardQuery(t);
+        }
+
+        protected Query GetRegexpQuery(String field, String termStr)
+        {
+            if (lowercaseExpandedTerms)
+            {
+                termStr = termStr.ToLower(locale);
+            }
+            Term t = new Term(field, termStr);
+            return NewRegexpQuery(t);
+        }
+
+        protected Query GetPrefixQuery(String field, String termStr)
+        {
+            if (!allowLeadingWildcard && termStr.StartsWith("*"))
+                throw new ParseException("'*' not allowed as first character in PrefixQuery");
+            if (lowercaseExpandedTerms)
+            {
+                termStr = termStr.ToLower(locale);
+            }
+            Term t = new Term(field, termStr);
+            return NewPrefixQuery(t);
+        }
+
+        protected Query GetFuzzyQuery(String field, String termStr, float minSimilarity)
+        {
+            if (lowercaseExpandedTerms)
+            {
+                termStr = termStr.ToLower(locale);
+            }
+            Term t = new Term(field, termStr);
+            return NewFuzzyQuery(t, minSimilarity, fuzzyPrefixLength);
+        }
+
+        internal Query HandleBareTokenQuery(String qfield, Token term, Token fuzzySlop, bool prefix, bool wildcard, bool fuzzy, bool regexp)
+        {
+            Query q;
+
+            String termImage = DiscardEscapeChar(term.image);
+            if (wildcard)
+            {
+                q = GetWildcardQuery(qfield, term.image);
+            }
+            else if (prefix)
+            {
+                q = GetPrefixQuery(qfield,
+                    DiscardEscapeChar(term.image.Substring
+                        (0, term.image.Length - 1)));
+            }
+            else if (regexp)
+            {
+                q = GetRegexpQuery(qfield, term.image.Substring(1, term.image.Length - 1));
+            }
+            else if (fuzzy)
+            {
+                q = HandleBareFuzzy(qfield, fuzzySlop, termImage);
+            }
+            else
+            {
+                q = GetFieldQuery(qfield, termImage, false);
+            }
+            return q;
+        }
+
+        internal Query HandleBareFuzzy(String qfield, Token fuzzySlop, String termImage)
+        {
+            Query q;
+            float fms = fuzzyMinSim;
+            try
+            {
+                fms = float.Parse(fuzzySlop.image.Substring(1));
+            }
+            catch (Exception) { }
+            if (fms < 0.0f)
+            {
+                throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !");
+            }
+            else if (fms >= 1.0f && fms != (int)fms)
+            {
+                throw new ParseException("Fractional edit distances are not allowed!");
+            }
+            q = GetFuzzyQuery(qfield, termImage, fms);
+            return q;
+        }
+
+        internal Query HandleQuotedTerm(String qfield, Token term, Token fuzzySlop)
+        {
+            int s = phraseSlop;  // default
+            if (fuzzySlop != null)
+            {
+                try
+                {
+                    s = (int)float.Parse(fuzzySlop.image.Substring(1));
+                }
+                catch (Exception ignored) { }
+            }
+            return GetFieldQuery(qfield, DiscardEscapeChar(term.image.Substring(1, term.image.Length - 1)), s);
+        }
+
+        internal Query HandleBoost(Query q, Token boost)
+        {
+            if (boost != null)
+            {
+                float f = (float)1.0;
+                try
+                {
+                    f = float.Parse(boost.image);
+                }
+                catch (Exception)
+                {
+                    /* Should this be handled somehow? (defaults to "no boost", if
+                     * boost number is invalid)
+                     */
+                }
+
+                // avoid boosting null queries, such as those caused by stop words
+                if (q != null)
+                {
+                    q.Boost = f;
+                }
+            }
+            return q;
+        }
+
+        internal String DiscardEscapeChar(String input)
+        {
+            // Create char array to hold unescaped char sequence
+            char[] output = new char[input.Length];
+
+            // The length of the output can be less than the input
+            // due to discarded escape chars. This variable holds
+            // the actual length of the output
+            int length = 0;
+
+            // We remember whether the last processed character was
+            // an escape character
+            bool lastCharWasEscapeChar = false;
+
+            // The multiplier the current unicode digit must be multiplied with.
+            // E. g. the first digit must be multiplied with 16^3, the second with 16^2...
+            int codePointMultiplier = 0;
+
+            // Used to calculate the codepoint of the escaped unicode character
+            int codePoint = 0;
+
+            for (int i = 0; i < input.Length; i++)
+            {
+                char curChar = input[i];
+                if (codePointMultiplier > 0)
+                {
+                    codePoint += HexToInt(curChar) * codePointMultiplier;
+                    codePointMultiplier = Number.URShift(codePointMultiplier, 4);
+                    if (codePointMultiplier == 0)
+                    {
+                        output[length++] = (char)codePoint;
+                        codePoint = 0;
+                    }
+                }
+                else if (lastCharWasEscapeChar)
+                {
+                    if (curChar == 'u')
+                    {
+                        // found an escaped unicode character
+                        codePointMultiplier = 16 * 16 * 16;
+                    }
+                    else
+                    {
+                        // this character was escaped
+                        output[length] = curChar;
+                        length++;
+                    }
+                    lastCharWasEscapeChar = false;
+                }
+                else
+                {
+                    if (curChar == '\\')
+                    {
+                        lastCharWasEscapeChar = true;
+                    }
+                    else
+                    {
+                        output[length] = curChar;
+                        length++;
+                    }
+                }
+            }
+
+            if (codePointMultiplier > 0)
+            {
+                throw new ParseException("Truncated unicode escape sequence.");
+            }
+
+            if (lastCharWasEscapeChar)
+            {
+                throw new ParseException("Term can not end with escape character.");
+            }
+
+            return new String(output, 0, length);
+        }
+
+        internal static int HexToInt(char c)
+        {
+            if ('0' <= c && c <= '9')
+            {
+                return c - '0';
+            }
+            else if ('a' <= c && c <= 'f')
+            {
+                return c - 'a' + 10;
+            }
+            else if ('A' <= c && c <= 'F')
+            {
+                return c - 'A' + 10;
+            }
+            else
+            {
+                throw new ParseException("Non-hex character in Unicode escape sequence: " + c);
+            }
+        }
+
+        public static String Escape(String s)
+        {
+            StringBuilder sb = new StringBuilder();
+            for (int i = 0; i < s.Length; i++)
+            {
+                char c = s[i];
+                // These characters are part of the query syntax and must be escaped
+                if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')' || c == ':'
+                  || c == '^' || c == '[' || c == ']' || c == '\"' || c == '{' || c == '}' || c == '~'
+                  || c == '*' || c == '?' || c == '|' || c == '&' || c == '/')
+                {
+                    sb.Append('\\');
+                }
+                sb.Append(c);
+            }
+            return sb.ToString();
+        }
+    }
+}


[13/50] [abbrv] git commit: Port: more util test classes

Posted by mh...@apache.org.
Port: more util test classes


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/e02cc69c
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/e02cc69c
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/e02cc69c

Branch: refs/heads/branch_4x
Commit: e02cc69c378e5673f46eb2bc344ef0cec782aa03
Parents: b713f3b
Author: James Blair <jm...@gmail.com>
Authored: Wed Jul 17 18:17:49 2013 -0400
Committer: James Blair <jm...@gmail.com>
Committed: Wed Jul 17 18:17:49 2013 -0400

----------------------------------------------------------------------
 test/core/Lucene.Net.Test.csproj           |   5 +-
 test/core/Util/Cache/TestSimpleLRUCache.cs |  77 -----
 test/core/Util/Fst/Test2BFST.cs            | 335 +++++++++++++++++++
 test/core/Util/Fst/TestBytesStore.cs       | 408 ++++++++++++++++++++++++
 test/core/Util/TestFilterIterator.cs       |   5 +
 test/core/Util/TestFixedBitSet.cs          | 373 ++++++++++++++++++++++
 test/core/Util/TestIdentityHashSet.cs      |  45 +++
 7 files changed, 1169 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e02cc69c/test/core/Lucene.Net.Test.csproj
----------------------------------------------------------------------
diff --git a/test/core/Lucene.Net.Test.csproj b/test/core/Lucene.Net.Test.csproj
index 2544dd6..cccd979 100644
--- a/test/core/Lucene.Net.Test.csproj
+++ b/test/core/Lucene.Net.Test.csproj
@@ -535,10 +535,12 @@
     <Compile Include="Util\Automaton\TestMinimize.cs" />
     <Compile Include="Util\Automaton\TestSpecialOperations.cs" />
     <Compile Include="Util\Automaton\TestUTF32ToUTF8.cs" />
-    <Compile Include="Util\Cache\TestSimpleLRUCache.cs" />
     <Compile Include="Util\English.cs">
       <SubType>Code</SubType>
     </Compile>
+    <Compile Include="Util\Fst\Test2BFST.cs" />
+    <Compile Include="Util\Fst\TestBytesStore.cs" />
+    <Compile Include="Util\Fst\TestFSTs.cs" />
     <Compile Include="Util\LocalizedTestCase.cs" />
     <Compile Include="Util\Paths.cs" />
     <Compile Include="Util\StressRamUsageEstimator.cs" />
@@ -648,7 +650,6 @@
     <Content Include="UpdatedTests.txt" />
   </ItemGroup>
   <ItemGroup>
-    <Folder Include="Util\Fst\" />
     <Folder Include="Util\Packed\" />
   </ItemGroup>
   <Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e02cc69c/test/core/Util/Cache/TestSimpleLRUCache.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Cache/TestSimpleLRUCache.cs b/test/core/Util/Cache/TestSimpleLRUCache.cs
deleted file mode 100644
index a33c2c7..0000000
--- a/test/core/Util/Cache/TestSimpleLRUCache.cs
+++ /dev/null
@@ -1,77 +0,0 @@
-/* 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-using System;
-
-using NUnit.Framework;
-
-using LuceneTestCase = Lucene.Net.Util.LuceneTestCase;
-
-namespace Lucene.Net.Util.Cache
-{
-	
-	[TestFixture]
-	public class TestSimpleLRUCache:LuceneTestCase
-	{
-		
-		[Test]
-		public virtual void  TestLRUCache()
-		{
-			int n = 100;
-            object dummy = new object();
-
-            Cache<int, object> cache = new SimpleLRUCache<int, object>(n);
-			
-			for (int i = 0; i < n; i++)
-			{
-				cache.Put(i, dummy);
-			}
-			
-			// access every 2nd item in cache
-			for (int i = 0; i < n; i += 2)
-			{
-				Assert.IsNotNull(cache.Get(i));
-			}
-			
-			// add n/2 elements to cache, the ones that weren't
-			// touched in the previous loop should now be thrown away
-			for (int i = n; i < n + (n / 2); i++)
-			{
-				cache.Put(i, dummy);
-			}
-			
-			// access every 4th item in cache
-			for (int i = 0; i < n; i += 4)
-			{
-				Assert.IsNotNull(cache.Get(i));
-			}
-			
-			// add 3/4n elements to cache, the ones that weren't
-			// touched in the previous loops should now be thrown away
-			for (int i = n; i < n + (n * 3 / 4); i++)
-			{
-				cache.Put(i, dummy);
-			}
-			
-			// access every 4th item in cache
-			for (int i = 0; i < n; i += 4)
-			{
-				Assert.IsNotNull(cache.Get(i));
-			}
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e02cc69c/test/core/Util/Fst/Test2BFST.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Fst/Test2BFST.cs b/test/core/Util/Fst/Test2BFST.cs
new file mode 100644
index 0000000..857f912
--- /dev/null
+++ b/test/core/Util/Fst/Test2BFST.cs
@@ -0,0 +1,335 @@
+using System;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Fst;
+using Lucene.Net.Util.Packed;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Fst
+{
+    [TestFixture]
+    [Ignore("Requires tons of heap to run (10G works)")]
+    [Timeout(360000000)] // @TimeoutSuite(millis = 100 * TimeUnits.HOUR)
+    public class Test2BFST : LuceneTestCase
+    {
+        private static long LIMIT = 3L * 1024 * 1024 * 1024;
+
+        [Test]
+        public void Test()
+        {
+            var ints = new int[7];
+            var input = new IntsRef(ints, 0, ints.Length);
+            var seed = new Random().NextLong();
+
+            Directory dir = new MMapDirectory(_TestUtil.GetTempDir("2BFST"));
+
+            for (var doPackIter = 0; doPackIter < 2; doPackIter++)
+            {
+                var doPack = doPackIter == 1;
+
+                // Build FST w/ NoOutputs and stop when nodeCount > 3B
+                if (!doPack)
+                {
+                    Console.WriteLine("\nTEST: 3B nodes; doPack=false output=NO_OUTPUTS");
+                    Outputs<object> outputs = NoOutputs.GetSingleton();
+                    var NO_OUTPUT = outputs.GetNoOutput();
+                    var b = new Builder<object>(FST.INPUT_TYPE.BYTE1, 0, 0, false, false, int.MaxValue, outputs,
+                                                                  null, doPack, PackedInts.COMPACT, true, 15);
+
+                    var count = 0;
+                    var r = new Random((int)seed);
+                    var ints2 = new int[200];
+                    var input2 = new IntsRefs(ints2, 0, ints2.Length);
+                    while (true)
+                    {
+                        //Console.WriteLine("add: " + input + " -> " + output);
+                        for (var i = 10; i < ints2.Length; i++)
+                        {
+                            ints2[i] = r.Next(256);
+                        }
+                        b.Add(input2, NO_OUTPUT);
+                        count++;
+                        if (count % 100000 == 0)
+                        {
+                            Console.WriteLine(count + ": " + b.FstSizeInBytes() + " bytes; " + b.TotStateCount + " nodes");
+                        }
+                        if (b.TotStateCount > LIMIT)
+                        {
+                            break;
+                        }
+                        NextInput(r, ints2);
+                    }
+
+                    var fst = b.Finish();
+
+                    for (var verify = 0; verify < 2; verify++)
+                    {
+                        Console.WriteLine("\nTEST: now verify [fst size=" + fst.SizeInBytes() + "; nodeCount=" + fst.NodeCount + "; arcCount=" + fst.ArcCount + "]");
+
+                        Arrays.Fill(ints2, 0);
+                        r = new Random((int)seed);
+
+                        for (var i = 0; i < count; i++)
+                        {
+                            if (i % 1000000 == 0)
+                            {
+                                Console.WriteLine(i + "...: ");
+                            }
+                            for (int j = 10; j < ints2.Length; j++)
+                            {
+                                ints2[j] = r.Next(256);
+                            }
+                            assertEquals(NO_OUTPUT, Util.get(fst, input2));
+                            NextInput(r, ints2);
+                        }
+
+                        Console.WriteLine("\nTEST: enum all input/outputs");
+                        var fstEnum = new IntsRefFSTEnum<object>(fst);
+
+                        Arrays.Fill(ints2, 0);
+                        r = new Random((int)seed);
+                        var upto = 0;
+                        while (true)
+                        {
+                            var pair = fstEnum.Next();
+                            if (pair == null)
+                            {
+                                break;
+                            }
+                            for (int j = 10; j < ints2.Length; j++)
+                            {
+                                ints2[j] = r.Next(256);
+                            }
+                            assertEquals(input2, pair.Input);
+                            assertEquals(NO_OUTPUT, pair.Output);
+                            upto++;
+                            NextInput(r, ints2);
+                        }
+                        assertEquals(count, upto);
+
+                        if (verify == 0)
+                        {
+                            Console.WriteLine("\nTEST: save/load FST and re-verify");
+                            var output = dir.CreateOutput("fst", IOContext.DEFAULT);
+                            fst.Save(output);
+                            output.Dispose();
+                            var input3 = dir.OpenInput("fst", IOContext.DEFAULT);
+                            fst = new FST<object>(input3, outputs);
+                            input3.Dispose();
+                        }
+                        else
+                        {
+                            dir.DeleteFile("fst");
+                        }
+                    }
+                }
+
+                // Build FST w/ ByteSequenceOutputs and stop when FST
+                // size = 3GB
+                {
+                    Console.WriteLine("\nTEST: 3 GB size; doPack=" + doPack + " outputs=bytes");
+                    Outputs<BytesRef> outputs = ByteSequenceOutputs.GetSingleton();
+                    var b = new Builder<BytesRef>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, int.MaxValue, outputs,
+                                                                      null, doPack, PackedInts.COMPACT, true, 15);
+
+                    var outputBytes = new byte[20];
+                    var output = new BytesRef(outputBytes);
+                    Arrays.Fill(ints, 0);
+                    var count = 0;
+                    var r = new Random(seed);
+                    while (true)
+                    {
+                        r.NextBytes(outputBytes);
+                        //Console.WriteLine("add: " + input + " -> " + output);
+                        b.Add(input, BytesRef.DeepCopyOf(output));
+                        count++;
+                        if (count % 1000000 == 0)
+                        {
+                            Console.WriteLine(count + "...: " + b.FstSizeInBytes() + " bytes");
+                        }
+                        if (b.FstSizeInBytes() > LIMIT)
+                        {
+                            break;
+                        }
+                        NextInput(r, ints);
+                    }
+
+                    FST<BytesRef> fst = b.Finish();
+                    for (int verify = 0; verify < 2; verify++)
+                    {
+
+                        Console.WriteLine("\nTEST: now verify [fst size=" + fst.SizeInBytes() + "; nodeCount=" + fst.NodeCount + "; arcCount=" + fst.ArcCount + "]");
+
+                        r = new Random(seed);
+                        Arrays.Fill(ints, 0);
+
+                        for (int i = 0; i < count; i++)
+                        {
+                            if (i % 1000000 == 0)
+                            {
+                                Console.WriteLine(i + "...: ");
+                            }
+                            r.NextBytes(outputBytes);
+                            assertEquals(output, Util.get(fst, input));
+                            NextInput(r, ints);
+                        }
+
+                        Console.WriteLine("\nTEST: enum all input/outputs");
+                        var fstEnum = new IntsRefFSTEnum<BytesRef>(fst);
+
+                        Arrays.Fill(ints, 0);
+                        r = new Random((int)seed);
+                        int upto = 0;
+                        while (true)
+                        {
+                            var pair = fstEnum.Next();
+                            if (pair == null)
+                            {
+                                break;
+                            }
+                            assertEquals(input, pair.Input);
+                            r.NextBytes(outputBytes);
+                            assertEquals(output, pair.Output);
+                            upto++;
+                            NextInput(r, ints);
+                        }
+                        assertEquals(count, upto);
+
+                        if (verify == 0)
+                        {
+                            Console.WriteLine("\nTEST: save/load FST and re-verify");
+                            var output2 = dir.CreateOutput("fst", IOContext.DEFAULT);
+                            fst.Save(output2);
+                            output2.Dispose();
+                            var input4 = dir.OpenInput("fst", IOContext.DEFAULT);
+                            fst = new FST<BytesRef>(input4, outputs);
+                            input4.Dispose();
+                        }
+                        else
+                        {
+                            dir.DeleteFile("fst");
+                        }
+                    }
+                }
+
+                // Build FST w/ PositiveIntOutputs and stop when FST
+                // size = 3GB
+                {
+                    Console.WriteLine("\nTEST: 3 GB size; doPack=" + doPack + " outputs=long");
+                    Outputs<long> outputs = PositiveIntOutputs.GetSingleton();
+                    Builder<long> b = new Builder<long>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, int.MaxValue, outputs,
+                                                              null, doPack, PackedInts.COMPACT, true, 15);
+
+                    long output = 1;
+
+                    Arrays.Fill(ints, 0);
+                    var count = 0;
+                    var r = new Random(seed);
+                    while (true)
+                    {
+                        //Console.WriteLine("add: " + input + " -> " + output);
+                        b.Add(input, output);
+                        output += 1 + r.Next(10);
+                        count++;
+                        if (count % 1000000 == 0)
+                        {
+                            Console.WriteLine(count + "...: " + b.FstSizeInBytes() + " bytes");
+                        }
+                        if (b.FstSizeInBytes() > LIMIT)
+                        {
+                            break;
+                        }
+                        NextInput(r, ints);
+                    }
+
+                    FST<long> fst = b.Finish();
+
+                    for (int verify = 0; verify < 2; verify++)
+                    {
+
+                        Console.WriteLine("\nTEST: now verify [fst size=" + fst.SizeInBytes() + "; nodeCount=" + fst.NodeCount + "; arcCount=" + fst.ArcCount + "]");
+
+                        Arrays.Fill(ints, 0);
+
+                        output = 1;
+                        r = new Random(seed);
+                        for (int i = 0; i < count; i++)
+                        {
+                            if (i % 1000000 == 0)
+                            {
+                                Console.WriteLine(i + "...: ");
+                            }
+
+                            // forward lookup:
+                            assertEquals(output, Util.get(fst, input).longValue());
+                            // reverse lookup:
+                            assertEquals(input, Util.getByOutput(fst, output));
+                            output += 1 + r.Next(10);
+                            NextInput(r, ints);
+                        }
+
+                        Console.WriteLine("\nTEST: enum all input/outputs");
+                        IntsRefFSTEnum<long> fstEnum = new IntsRefFSTEnum<long>(fst);
+
+                        Arrays.Fill(ints, 0);
+                        r = new Random((int)seed);
+                        int upto = 0;
+                        output = 1;
+                        while (true)
+                        {
+                            var pair = fstEnum.Next();
+                            if (pair == null)
+                            {
+                                break;
+                            }
+                            assertEquals(input, pair.Input);
+                            assertEquals(output, pair.Output);
+                            output += 1 + r.Next(10);
+                            upto++;
+                            NextInput(r, ints);
+                        }
+                        assertEquals(count, upto);
+
+                        if (verify == 0)
+                        {
+                            Console.WriteLine("\nTEST: save/load FST and re-verify");
+                            var output3 = dir.CreateOutput("fst", IOContext.DEFAULT);
+                            fst.Save(output3);
+                            output3.Dispose();
+                            var input5 = dir.OpenInput("fst", IOContext.DEFAULT);
+                            fst = new FST<long>(input5, outputs);
+                            input5.Dispose();
+                        }
+                        else
+                        {
+                            dir.DeleteFile("fst");
+                        }
+                    }
+                }
+            }
+            dir.Dispose();
+        }
+
+        private void NextInput(Random r, int[] ints)
+        {
+            var downTo = 6;
+            while (downTo >= 0)
+            {
+                // Must add random amounts (and not just 1) because
+                // otherwise FST outsmarts us and remains tiny:
+                ints[downTo] += 1 + r.Next(10);
+                if (ints[downTo] < 256)
+                {
+                    break;
+                }
+                else
+                {
+                    ints[downTo] = 0;
+                    downTo--;
+                }
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e02cc69c/test/core/Util/Fst/TestBytesStore.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/Fst/TestBytesStore.cs b/test/core/Util/Fst/TestBytesStore.cs
new file mode 100644
index 0000000..df435a2
--- /dev/null
+++ b/test/core/Util/Fst/TestBytesStore.cs
@@ -0,0 +1,408 @@
+using System;
+using Lucene.Net.Store;
+using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using Lucene.Net.Util.Fst;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util.Fst
+{
+    [TestFixture]
+    public class TestBytesStore : LuceneTestCase
+    {
+        [Test]
+        public void TestRandom()
+        {
+
+            int iters = AtLeast(10);
+            for (var iter = 0; iter < iters; iter++)
+            {
+                int numBytes = _TestUtil.NextInt(new Random(), 1, 200000);
+                var expected = new sbyte[numBytes];
+                int blockBits = _TestUtil.NextInt(new Random(), 8, 15);
+                var bytes = new BytesStore(blockBits);
+                if (VERBOSE)
+                {
+                    Console.WriteLine("TEST: iter=" + iter + " numBytes=" + numBytes + " blockBits=" + blockBits);
+                }
+
+                var pos = 0;
+                while (pos < numBytes)
+                {
+                    int op = new Random().Next(8);
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("  cycle pos=" + pos);
+                    }
+                    switch (op)
+                    {
+
+                        case 0:
+                            {
+                                // write random byte
+                                var b = (sbyte)new Random().Next(256);
+                                if (VERBOSE)
+                                {
+                                    Console.WriteLine("    writeByte b=" + b);
+                                }
+
+                                expected[pos++] = b;
+                                bytes.WriteByte(b);
+                            }
+                            break;
+
+                        case 1:
+                            {
+                                // write random byte[]
+                                var len = new Random().Next(Math.Min(numBytes - pos, 100));
+                                var temp = new byte[len];
+                                new Random().NextBytes(temp);
+                                if (VERBOSE)
+                                {
+                                    Console.WriteLine("    writeBytes len=" + len + " bytes=" + Arrays.ToString(temp));
+                                }
+                                Array.Copy(temp, 0, expected, pos, temp.Length);
+                                bytes.WriteBytes(temp, 0, temp.Length);
+                                pos += len;
+                            }
+                            break;
+
+                        case 2:
+                            {
+                                // write int @ absolute pos
+                                if (pos > 4)
+                                {
+                                    int x = new Random().Next();
+                                    int randomPos = new Random().Next(pos - 4);
+                                    if (VERBOSE)
+                                    {
+                                        Console.WriteLine("    abs writeInt pos=" + randomPos + " x=" + x);
+                                    }
+                                    bytes.WriteInt(randomPos, x);
+                                    expected[randomPos++] = (sbyte)(x >> 24);
+                                    expected[randomPos++] = (sbyte)(x >> 16);
+                                    expected[randomPos++] = (sbyte)(x >> 8);
+                                    expected[randomPos++] = (sbyte)x;
+                                }
+                            }
+                            break;
+
+                        case 3:
+                            {
+                                // reverse bytes
+                                if (pos > 1)
+                                {
+                                    int len = _TestUtil.NextInt(new Random(), 2, Math.Min(100, pos));
+                                    int start;
+                                    if (len == pos)
+                                    {
+                                        start = 0;
+                                    }
+                                    else
+                                    {
+                                        start = new Random().Next(pos - len);
+                                    }
+                                    var end = start + len - 1;
+                                    if (VERBOSE)
+                                    {
+                                        Console.WriteLine("    reverse start=" + start + " end=" + end + " len=" + len + " pos=" + pos);
+                                    }
+                                    bytes.Reverse(start, end);
+
+                                    while (start <= end)
+                                    {
+                                        var b = expected[end];
+                                        expected[end] = expected[start];
+                                        expected[start] = b;
+                                        start++;
+                                        end--;
+                                    }
+                                }
+                            }
+                            break;
+
+                        case 4:
+                            {
+                                // abs write random byte[]
+                                if (pos > 2)
+                                {
+                                    int randomPos = new Random().Next(pos - 1);
+                                    int len = _TestUtil.NextInt(new Random(), 1, Math.Min(pos - randomPos - 1, 100));
+                                    byte[] temp = new byte[len];
+                                    new Random().NextBytes(temp);
+                                    if (VERBOSE)
+                                    {
+                                        Console.WriteLine("    abs writeBytes pos=" + randomPos + " len=" + len + " bytes=" + Arrays.ToString(temp));
+                                    }
+                                    Array.Copy(temp, 0, expected, randomPos, temp.Length);
+                                    bytes.WriteBytes(randomPos, temp, 0, temp.Length);
+                                }
+                            }
+                            break;
+
+                        case 5:
+                            {
+                                // copyBytes
+                                if (pos > 1)
+                                {
+                                    int src = new Random().Next(pos - 1);
+                                    int dest = _TestUtil.NextInt(new Random(), src + 1, pos - 1);
+                                    int len = _TestUtil.NextInt(new Random(), 1, Math.Min(300, pos - dest));
+                                    if (VERBOSE)
+                                    {
+                                        Console.WriteLine("    copyBytes src=" + src + " dest=" + dest + " len=" + len);
+                                    }
+                                    Array.Copy(expected, src, expected, dest, len);
+                                    bytes.CopyBytes(src, dest, len);
+                                }
+                            }
+                            break;
+
+                        case 6:
+                            {
+                                // skip
+                                var len = new Random().Next(Math.Min(100, numBytes - pos));
+
+                                if (VERBOSE)
+                                {
+                                    Console.WriteLine("    skip len=" + len);
+                                }
+
+                                pos += len;
+                                bytes.SkipBytes(len);
+
+                                // NOTE: must fill in zeros in case truncate was
+                                // used, else we get false fails:
+                                if (len > 0)
+                                {
+                                    var zeros = new sbyte[len];
+                                    bytes.WriteBytes(pos - len, zeros, 0, len);
+                                }
+                            }
+                            break;
+
+                        case 7:
+                            {
+                                // absWriteByte
+                                if (pos > 0)
+                                {
+                                    var dest = new Random().Next(pos);
+                                    var b = (sbyte)new Random().Next(256);
+                                    expected[dest] = b;
+                                    bytes.WriteByte(dest, b);
+                                }
+                                break;
+                            }
+                    }
+
+                    Assert.AreEqual(pos, bytes.GetPosition());
+
+                    if (pos > 0 && new Random().Next(50) == 17)
+                    {
+                        // truncate
+                        int len = _TestUtil.NextInt(new Random(), 1, Math.Min(pos, 100));
+                        bytes.Truncate(pos - len);
+                        pos -= len;
+                        Arrays.Fill(expected, pos, pos + len, (sbyte)0);
+                        if (VERBOSE)
+                        {
+                            Console.WriteLine("    truncate len=" + len + " newPos=" + pos);
+                        }
+                    }
+
+                    if ((pos > 0 && new Random().Next(200) == 17))
+                    {
+                        Verify(bytes, expected, pos);
+                    }
+                }
+
+                BytesStore bytesToVerify;
+
+                if (new Random().NextBool())
+                {
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("TEST: save/load  bytes");
+                    }
+                    Directory dir = NewDirectory();
+                    var output = dir.CreateOutput("bytes", IOContext.DEFAULT);
+                    bytes.WriteTo(output);
+                    output.Dispose();
+                    var input = dir.OpenInput("bytes", IOContext.DEFAULT);
+                    bytesToVerify = new BytesStore(input, numBytes, _TestUtil.NextInt(new Random(), 256, int.MaxValue));
+                    input.Dispose();
+                    dir.Dispose();
+                }
+                else
+                {
+                    bytesToVerify = bytes;
+                }
+
+                Verify(bytesToVerify, expected, numBytes);
+            }
+        }
+
+        private void Verify(BytesStore bytes, sbyte[] expected, int totalLength)
+        {
+            Assert.AreEqual(totalLength, bytes.GetPosition());
+            if (totalLength == 0)
+            {
+                return;
+            }
+            if (VERBOSE)
+            {
+                Console.WriteLine("  verify...");
+            }
+
+            // First verify whole thing in one blast:
+            var actual = new sbyte[totalLength];
+            if (new Random().NextBool())
+            {
+                if (VERBOSE)
+                {
+                    Console.WriteLine("    bulk: reversed");
+                }
+                // reversed
+                var reverseReader = bytes.GetReverseReader();
+                Assert.IsTrue(reverseReader.Reversed());
+                reverseReader.Position = totalLength - 1;
+                reverseReader.ReadBytes(actual, 0, actual.Length);
+                var start = 0;
+                var end = totalLength - 1;
+                while (start < end)
+                {
+                    var b = actual[start];
+                    actual[start] = actual[end];
+                    actual[end] = b;
+                    start++;
+                    end--;
+                }
+            }
+            else
+            {
+                // forward
+                if (VERBOSE)
+                {
+                    Console.WriteLine("    bulk: forward");
+                }
+                var forwardReader = bytes.GetForwardReader();
+                Assert.IsFalse(forwardReader.Reversed());
+                forwardReader.ReadBytes(actual, 0, actual.Length);
+            }
+
+            for (int i = 0; i < totalLength; i++)
+            {
+                Assert.AreEqual(expected[i], actual[i], "byte @ index=" + i);
+            }
+
+            FST.BytesReader r;
+
+            // Then verify ops:
+            bool reversed = new Random().NextBool();
+            if (reversed)
+            {
+                if (VERBOSE)
+                {
+                    Console.WriteLine("    ops: reversed");
+                }
+                r = bytes.GetReverseReader();
+            }
+            else
+            {
+                if (VERBOSE)
+                {
+                    Console.WriteLine("    ops: forward");
+                }
+                r = bytes.GetForwardReader();
+            }
+
+            if (totalLength > 1)
+            {
+                int numOps = _TestUtil.NextInt(new Random(), 100, 200);
+                for (int op = 0; op < numOps; op++)
+                {
+
+                    int numBytes = new Random().Next(Math.Min(1000, totalLength - 1));
+                    int pos;
+                    if (reversed)
+                    {
+                        pos = _TestUtil.NextInt(new Random(), numBytes, totalLength - 1);
+                    }
+                    else
+                    {
+                        pos = new Random().Next(totalLength - numBytes);
+                    }
+                    if (VERBOSE)
+                    {
+                        Console.WriteLine("    op iter=" + op + " reversed=" + reversed + " numBytes=" + numBytes + " pos=" + pos);
+                    }
+                    var temp = new sbyte[numBytes];
+                    r.Position = pos;
+                    Assert.AreEqual(pos, r.Position);
+                    r.ReadBytes(temp, 0, temp.Length);
+                    for (int i = 0; i < numBytes; i++)
+                    {
+                        sbyte expectedByte;
+                        if (reversed)
+                        {
+                            expectedByte = expected[pos - i];
+                        }
+                        else
+                        {
+                            expectedByte = expected[pos + i];
+                        }
+                        Assert.AreEqual(expectedByte, temp[i], "byte @ index=" + i);
+                    }
+
+                    int left;
+                    int expectedPos;
+
+                    if (reversed)
+                    {
+                        expectedPos = pos - numBytes;
+                        left = (int)r.Position;
+                    }
+                    else
+                    {
+                        expectedPos = pos + numBytes;
+                        left = (int)(totalLength - r.Position);
+                    }
+                    Assert.AreEqual(expectedPos, r.Position);
+
+                    if (left > 4)
+                    {
+                        int skipBytes = new Random().Next(left - 4);
+
+                        int expectedInt = 0;
+                        if (reversed)
+                        {
+                            expectedPos -= skipBytes;
+                            expectedInt |= (expected[expectedPos--] & 0xFF) << 24;
+                            expectedInt |= (expected[expectedPos--] & 0xFF) << 16;
+                            expectedInt |= (expected[expectedPos--] & 0xFF) << 8;
+                            expectedInt |= (expected[expectedPos--] & 0xFF);
+                        }
+                        else
+                        {
+                            expectedPos += skipBytes;
+                            expectedInt |= (expected[expectedPos++] & 0xFF) << 24;
+                            expectedInt |= (expected[expectedPos++] & 0xFF) << 16;
+                            expectedInt |= (expected[expectedPos++] & 0xFF) << 8;
+                            expectedInt |= (expected[expectedPos++] & 0xFF);
+                        }
+
+                        if (VERBOSE)
+                        {
+                            Console.WriteLine("    skip numBytes=" + skipBytes);
+                            Console.WriteLine("    readInt");
+                        }
+
+                        r.SkipBytes(skipBytes);
+                        Assert.AreEqual(expectedInt, r.ReadInt());
+                    }
+                }
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e02cc69c/test/core/Util/TestFilterIterator.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestFilterIterator.cs b/test/core/Util/TestFilterIterator.cs
new file mode 100644
index 0000000..04816d4
--- /dev/null
+++ b/test/core/Util/TestFilterIterator.cs
@@ -0,0 +1,5 @@
+namespace Lucene.Net.Test.Util
+{
+    // There is no FilterIterator class, as LINQ handles this very well with the Where method.
+    // This file is here to help with matching up files with the java file structure.
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e02cc69c/test/core/Util/TestFixedBitSet.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestFixedBitSet.cs b/test/core/Util/TestFixedBitSet.cs
new file mode 100644
index 0000000..82d3e3c
--- /dev/null
+++ b/test/core/Util/TestFixedBitSet.cs
@@ -0,0 +1,373 @@
+using System;
+using System.Collections;
+using Lucene.Net.Search;
+using Lucene.Net.Support;
+using Lucene.Net.Test.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestFixedBitSet : LuceneTestCase
+    {
+        internal virtual void DoGet(BitArray a, FixedBitSet b)
+        {
+            int max = b.Length;
+            for (var i = 0; i < max; i++)
+            {
+                if (a[i] != b[i])
+                {
+                    Fail("mismatch: BitArray=[" + i + "]=" + a[i]);
+                }
+            }
+        }
+
+        internal virtual void doNextSetBit(BitArray a, FixedBitSet b)
+        {
+            int aa = -1, bb = -1;
+            do
+            {
+                aa = a.NextSetBit(aa + 1);
+                bb = bb < b.Length - 1 ? b.NextSetBit(bb + 1) : -1;
+                Assert.Equals(aa, bb);
+            } while (aa >= 0);
+        }
+
+        internal virtual void doPrevSetBit(BitArray a, FixedBitSet b)
+        {
+            int aa = a.Length + new Random().Next(100);
+            int bb = aa;
+            do
+            {
+                // aa = a.prevSetBit(aa-1);
+                aa--;
+                while ((aa >= 0) && (!a[aa]))
+                {
+                    aa--;
+                }
+                if (b.Length == 0)
+                {
+                    bb = -1;
+                }
+                else if (bb > b.Length - 1)
+                {
+                    bb = b.PrevSetBit(b.Length - 1);
+                }
+                else if (bb < 1)
+                {
+                    bb = -1;
+                }
+                else
+                {
+                    bb = bb >= 1 ? b.PrevSetBit(bb - 1) : -1;
+                }
+                Assert.Equals(aa, bb);
+            } while (aa >= 0);
+        }
+
+        // test interleaving different FixedBitSetIterator.next()/skipTo()
+        internal virtual void DoIterate(BitArray a, FixedBitSet b, int mode)
+        {
+            if (mode == 1) DoIterate1(a, b);
+            if (mode == 2) DoIterate2(a, b);
+        }
+
+        internal virtual void DoIterate1(BitArray a, FixedBitSet b)
+        {
+            int aa = -1, bb = -1;
+            var iterator = b.Iterator();
+            do
+            {
+                aa = a.NextSetBit(aa + 1);
+                bb = (bb < b.Length && new Random().NextBool()) ? iterator.NextDoc() : iterator.Advance(bb + 1);
+                Assert.Equals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb);
+            } while (aa >= 0);
+        }
+
+        internal virtual void DoIterate2(BitArray a, FixedBitSet b)
+        {
+            int aa = -1, bb = -1;
+            var iterator = b.Iterator();
+            do
+            {
+                aa = a.NextSetBit(aa + 1);
+                bb = new Random().NextBool() ? iterator.NextDoc() : iterator.Advance(bb + 1);
+                Assert.Equals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb);
+            } while (aa >= 0);
+        }
+
+        internal virtual void DoRandomSets(int maxSize, int iter, int mode)
+        {
+            BitArray a0 = null;
+            FixedBitSet b0 = null;
+
+            var random = new Random();
+
+            for (var i = 0; i < iter; i++)
+            {
+                int sz = _TestUtil.Next(random, 2, maxSize);
+                var a = new BitArray(sz);
+                var b = new FixedBitSet(sz);
+
+                // test the various ways of setting bits
+                if (sz > 0)
+                {
+                    var nOper = random.Next(sz);
+                    for (var j = 0; j < nOper; j++)
+                    {
+                        int idx;
+
+                        idx = random.Next(sz);
+                        a.Set(idx);
+                        b.Set(idx);
+
+                        idx = random.Next(sz);
+                        a.Clear(idx);
+                        b.Clear(idx);
+
+                        idx = random.Next(sz);
+                        a.Flip(idx);
+                        b.Flip(idx, idx + 1);
+
+                        idx = random.Next(sz);
+                        a.Flip(idx);
+                        b.Flip(idx, idx + 1);
+                        bool val2 = b[idx];
+                        bool val = b.GetAndSet(idx);
+                        Assert.IsTrue(val2 == val);
+                        Assert.IsTrue(b[idx]);
+
+                        if (!val) b.Clear(idx);
+                        Assert.IsTrue(b[idx] == val);
+                    }
+                }
+
+                // test that the various ways of accessing the bits are equivalent
+                DoGet(a, b);
+
+                // test ranges, including possible extension
+                int fromIndex, toIndex;
+                fromIndex = random.Next(sz / 2);
+                toIndex = fromIndex + random.Next(sz - fromIndex);
+                var aa = (BitArray)a.Clone(); aa.Flip(fromIndex, toIndex);
+                var bb = b.Clone(); bb.Flip(fromIndex, toIndex);
+
+                DoIterate(aa, bb, mode);   // a problem here is from Flip or DoIterate
+
+                fromIndex = random.Next(sz / 2);
+                toIndex = fromIndex + random.Next(sz - fromIndex);
+                aa = (BitArray)a.Clone(); aa.Clear(fromIndex, toIndex);
+                bb = b.Clone(); bb.Clear(fromIndex, toIndex);
+
+                doNextSetBit(aa, bb); // a problem here is from Clear() or nextSetBit
+
+                doPrevSetBit(aa, bb);
+
+                fromIndex = random.Next(sz / 2);
+                toIndex = fromIndex + random.Next(sz - fromIndex);
+                aa = (BitArray)a.Clone(); aa.Set(fromIndex, toIndex);
+                bb = b.Clone(); bb.Set(fromIndex, toIndex);
+
+                doNextSetBit(aa, bb); // a problem here is from set() or nextSetBit
+
+                doPrevSetBit(aa, bb);
+
+                if (b0 != null && b0.Length <= b.Length)
+                {
+                    Assert.Equals(a.Cardinality(), b.Cardinality());
+
+                    var a_and = (BitArray)a.Clone(); a_and.And(a0);
+                    var a_or = (BitArray)a.Clone(); a_or.Or(a0);
+                    var a_andn = (BitArray)a.Clone(); a_andn.AndNot(a0);
+
+                    var b_and = b.Clone(); Assert.Equals(b, b_and); b_and.And(b0);
+                    var b_or = b.Clone(); b_or.Or(b0);
+                    var b_andn = b.Clone(); b_andn.AndNot(b0);
+
+                    Assert.Equals(a0.Cardinality(), b0.Cardinality());
+                    Assert.Equals(a_or.Cardinality(), b_or.Cardinality());
+
+                    DoIterate(a_and, b_and, mode);
+                    DoIterate(a_or, b_or, mode);
+                    DoIterate(a_andn, b_andn, mode);
+
+                    Assert.Equals(a_and.Cardinality(), b_and.Cardinality());
+                    Assert.Equals(a_or.Cardinality(), b_or.Cardinality());
+                    Assert.Equals(a_andn.Cardinality(), b_andn.Cardinality());
+                }
+
+                a0 = a;
+                b0 = b;
+            }
+        }
+
+        // large enough to flush obvious bugs, small enough to run in <.5 sec as part of a
+        // larger testsuite.
+        [Test]
+        public void TestSmall()
+        {
+            DoRandomSets(AtLeast(1200), AtLeast(1000), 1);
+            DoRandomSets(AtLeast(1200), AtLeast(1000), 2);
+        }
+
+        // uncomment to run a bigger test (~2 minutes).
+        /*
+        [Test]
+        public void TestBig() {
+          DoRandomSets(2000,200000, 1);
+          DoRandomSets(2000,200000, 2);
+        }
+        */
+
+        [Test]
+        public void TestEquals()
+        {
+            var random = new Random();
+
+            // This test can't handle numBits==0:
+            var numBits = random.Next(2000) + 1;
+            var b1 = new FixedBitSet(numBits);
+            var b2 = new FixedBitSet(numBits);
+            Assert.IsTrue(b1.Equals(b2));
+            Assert.IsTrue(b2.Equals(b1));
+            for (var iter = 0; iter < 10 * RANDOM_MULTIPLIER; iter++)
+            {
+                var idx = random.Next(numBits);
+                if (!b1[idx])
+                {
+                    b1.Set(idx);
+                    Assert.IsFalse(b1.Equals(b2));
+                    Assert.IsFalse(b2.Equals(b1));
+                    b2.Set(idx);
+                    Assert.IsTrue(b1.Equals(b2));
+                    Assert.IsTrue(b2.Equals(b1));
+                }
+            }
+
+            // try different type of object
+            Assert.IsFalse(b1.Equals(new Object()));
+        }
+
+        [Test]
+        public void TestHashCodeEquals()
+        {
+            var random = new Random();
+
+            // This test can't handle numBits==0:
+            var numBits = random.Next(2000) + 1;
+            var b1 = new FixedBitSet(numBits);
+            var b2 = new FixedBitSet(numBits);
+            Assert.IsTrue(b1.Equals(b2));
+            Assert.IsTrue(b2.Equals(b1));
+            for (var iter = 0; iter < 10 * RANDOM_MULTIPLIER; iter++)
+            {
+                int idx = random.Next(numBits);
+                if (!b1[idx])
+                {
+                    b1.Set(idx);
+                    Assert.IsFalse(b1.Equals(b2));
+                    Assert.IsFalse(b1.GetHashCode() == b2.GetHashCode());
+                    b2.Set(idx);
+                    Assert.Equals(b1, b2);
+                    Assert.Equals(b1.GetHashCode(), b2.GetHashCode());
+                }
+            }
+        }
+
+        [Test]
+        public void TestSmallBitSets()
+        {
+            // Make sure size 0-10 bit sets are OK:
+            for (var numBits = 0; numBits < 10; numBits++)
+            {
+                var b1 = new FixedBitSet(numBits);
+                var b2 = new FixedBitSet(numBits);
+                Assert.IsTrue(b1.Equals(b2));
+                Assert.Equals(b1.GetHashCode(), b2.GetHashCode());
+                Assert.Equals(0, b1.Cardinality());
+                if (numBits > 0)
+                {
+                    b1.Set(0, numBits);
+                    Assert.Equals(numBits, b1.Cardinality());
+                    b1.Flip(0, numBits);
+                    Assert.Equals(0, b1.Cardinality());
+                }
+            }
+        }
+
+        private FixedBitSet MakeFixedBitSet(int[] a, int numBits)
+        {
+            var random = new Random();
+            
+            FixedBitSet bs;
+            if (random.NextBool())
+            {
+                var bits2words = FixedBitSet.Bits2Words(numBits);
+                var words = new long[bits2words + random.Next(100)];
+                for (var i = bits2words; i < words.Length; i++)
+                {
+                    words[i] = random.NextLong();
+                }
+                bs = new FixedBitSet(words, numBits);
+
+            }
+            else
+            {
+                bs = new FixedBitSet(numBits);
+            }
+            foreach (var e in a)
+            {
+                bs.Set(e);
+            }
+            return bs;
+        }
+
+        private BitArray MakeBitSet(int[] a)
+        {
+            var bs = new BitArray();
+            foreach (var e in a)
+            {
+                bs.Set(e);
+            }
+            return bs;
+        }
+
+        private void CheckPrevSetBitArray(int[] a, int numBits)
+        {
+            var obs = MakeFixedBitSet(a, numBits);
+            var bs = MakeBitSet(a);
+            doPrevSetBit(bs, obs);
+        }
+
+        [Test]
+        public void TestPrevSetBit()
+        {
+            CheckPrevSetBitArray(new int[] { }, 0);
+            CheckPrevSetBitArray(new int[] { 0 }, 1);
+            CheckPrevSetBitArray(new int[] { 0, 2 }, 3);
+        }
+
+        private void CheckNextSetBitArray(int[] a, int numBits)
+        {
+            var obs = MakeFixedBitSet(a, numBits);
+            var bs = MakeBitSet(a);
+            doNextSetBit(bs, obs);
+        }
+
+        [Test]
+        public void TestNextBitSet()
+        {
+            var random = new Random();
+
+            var setBits = new int[0 + random.Next(1000)];
+            for (var i = 0; i < setBits.Length; i++)
+            {
+                setBits[i] = random.Next(setBits.Length);
+            }
+            CheckNextSetBitArray(setBits, setBits.Length + random.Next(10));
+
+            CheckNextSetBitArray(new int[0], setBits.Length + random.Next(10));
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/e02cc69c/test/core/Util/TestIdentityHashSet.cs
----------------------------------------------------------------------
diff --git a/test/core/Util/TestIdentityHashSet.cs b/test/core/Util/TestIdentityHashSet.cs
new file mode 100644
index 0000000..0c7e1e7
--- /dev/null
+++ b/test/core/Util/TestIdentityHashSet.cs
@@ -0,0 +1,45 @@
+using System;
+using System.Collections.Generic;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using NUnit.Framework;
+
+namespace Lucene.Net.Test.Util
+{
+    [TestFixture]
+    public class TestIdentityHashSet : LuceneTestCase
+    {
+        public void testCheck()
+        {
+            var rnd = new Random();
+            ISet<object> jdk = Collections.NewSetFromMap(
+                new IdentityHashMap<object, bool>());
+            RamUsageEstimator.IdentityHashSet<object> us = new RamUsageEstimator.IdentityHashSet<object>();
+
+            var max = 100000;
+            var threshold = 256;
+            for (var i = 0; i < max; i++)
+            {
+                // some of these will be interned and some will not so there will be collisions.
+                var v = rnd.Next(threshold);
+
+                bool e1 = jdk.Contains(v);
+                bool e2 = us.Contains(v);
+                Assert.Equals(e1, e2);
+
+                e1 = jdk.Add(v);
+                e2 = us.Add(v);
+                Assert.Equals(e1, e2);
+            }
+
+            ISet<object> collected = Collections.NewSetFromMap(
+                new IdentityHashMap<object, bool>());
+            foreach (var o in us)
+            {
+                collected.Add(o);
+            }
+
+            Assert.Equals(collected, jdk);
+        }
+    }
+}