You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by sy...@apache.org on 2015/04/16 03:30:35 UTC

[1/3] lucenenet git commit: make sure to init new instance of bytresref when enumerating

Repository: lucenenet
Updated Branches:
  refs/heads/master b4eaf2fc4 -> c0aa821d4


make sure to init new instance of bytresref when enumerating


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/b61e2b90
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/b61e2b90
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/b61e2b90

Branch: refs/heads/master
Commit: b61e2b904f70c2328b7fb1d6d051261f3e90cad3
Parents: b4eaf2f
Author: Laimonas Simutis <la...@gmail.com>
Authored: Wed Apr 15 20:58:43 2015 -0400
Committer: Laimonas Simutis <la...@gmail.com>
Committed: Wed Apr 15 20:58:43 2015 -0400

----------------------------------------------------------------------
 src/Lucene.Net.Core/Codecs/DocValuesConsumer.cs       | 4 ++--
 src/Lucene.Net.Core/Index/ReadersAndUpdates.cs        | 2 +-
 src/Lucene.Net.Core/Index/SortedDocValuesWriter.cs    | 3 +--
 src/Lucene.Net.Core/Index/SortedSetDocValuesWriter.cs | 2 +-
 4 files changed, 5 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b61e2b90/src/Lucene.Net.Core/Codecs/DocValuesConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Codecs/DocValuesConsumer.cs b/src/Lucene.Net.Core/Codecs/DocValuesConsumer.cs
index a5fafe3..b2be765 100644
--- a/src/Lucene.Net.Core/Codecs/DocValuesConsumer.cs
+++ b/src/Lucene.Net.Core/Codecs/DocValuesConsumer.cs
@@ -281,11 +281,11 @@ namespace Lucene.Net.Codecs
 
         private IEnumerable<BytesRef> GetMergeSortValuesEnumerable(OrdinalMap map, SortedDocValues[] dvs)
         {
-            var scratch = new BytesRef();
             int currentOrd = 0;
 
             while (currentOrd < map.ValueCount)
             {
+                var scratch = new BytesRef();
                 int segmentNumber = map.GetFirstSegmentNumber(currentOrd);
                 var segmentOrd = (int)map.GetFirstSegmentOrd(currentOrd);
                 dvs[segmentNumber].LookupOrd(segmentOrd, scratch);
@@ -542,13 +542,13 @@ namespace Lucene.Net.Codecs
 
         private IEnumerable<BytesRef> GetMergeSortedSetValuesEnumerable(OrdinalMap map, SortedSetDocValues[] dvs)
         {
-            var scratch = new BytesRef();
             long currentOrd = 0;
 
             while (currentOrd < map.ValueCount)
             {
                 int segmentNumber = map.GetFirstSegmentNumber(currentOrd);
                 long segmentOrd = map.GetFirstSegmentOrd(currentOrd);
+                var scratch = new BytesRef();
                 dvs[segmentNumber].LookupOrd(segmentOrd, scratch);
                 currentOrd++;
                 yield return scratch;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b61e2b90/src/Lucene.Net.Core/Index/ReadersAndUpdates.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/ReadersAndUpdates.cs b/src/Lucene.Net.Core/Index/ReadersAndUpdates.cs
index 1becab9..8d971d7 100644
--- a/src/Lucene.Net.Core/Index/ReadersAndUpdates.cs
+++ b/src/Lucene.Net.Core/Index/ReadersAndUpdates.cs
@@ -707,7 +707,6 @@ namespace Lucene.Net.Index
             Bits DocsWithField = reader.GetDocsWithField(field);
             int maxDoc = reader.MaxDoc;
             var iter = (BinaryDocValuesFieldUpdates.Iterator)fieldUpdates.GetIterator();
-            BytesRef scratch = new BytesRef();
             int updateDoc = iter.NextDoc();
 
             for (int curDoc = 0; curDoc < maxDoc; ++curDoc)
@@ -722,6 +721,7 @@ namespace Lucene.Net.Index
                 {   // no update for this document
                     if (currentValues != null && DocsWithField.Get(curDoc))
                     {
+                        var scratch = new BytesRef();
                         // only read the current value if the document had a value before
                         currentValues.Get(curDoc, scratch);
                         yield return scratch;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b61e2b90/src/Lucene.Net.Core/Index/SortedDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SortedDocValuesWriter.cs b/src/Lucene.Net.Core/Index/SortedDocValuesWriter.cs
index eefdb6d..7a40220 100644
--- a/src/Lucene.Net.Core/Index/SortedDocValuesWriter.cs
+++ b/src/Lucene.Net.Core/Index/SortedDocValuesWriter.cs
@@ -133,10 +133,9 @@ namespace Lucene.Net.Index
 
         private IEnumerable<BytesRef> GetBytesRefEnumberable(int valueCount, int[] sortedValues)
         {
-            BytesRef scratch = new BytesRef();
-
             for (int i = 0; i < valueCount; ++i)
             {
+                var scratch = new BytesRef();
                 yield return Hash.Get(sortedValues[i], scratch);
             }
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/b61e2b90/src/Lucene.Net.Core/Index/SortedSetDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Index/SortedSetDocValuesWriter.cs b/src/Lucene.Net.Core/Index/SortedSetDocValuesWriter.cs
index 6969b3a..2242790 100644
--- a/src/Lucene.Net.Core/Index/SortedSetDocValuesWriter.cs
+++ b/src/Lucene.Net.Core/Index/SortedSetDocValuesWriter.cs
@@ -189,9 +189,9 @@ namespace Lucene.Net.Index
 
         private IEnumerable<BytesRef> GetBytesRefEnumberable(int valueCount, int[] sortedValues)
         {
-            BytesRef scratch = new BytesRef();
             for (int i = 0; i < valueCount; ++i)
             {
+                var scratch = new BytesRef();
                 yield return Hash.Get(sortedValues[i], scratch);
             }
         }


[2/3] lucenenet git commit: fix reverse byte reader

Posted by sy...@apache.org.
fix reverse byte reader


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/8b9ae9cb
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/8b9ae9cb
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/8b9ae9cb

Branch: refs/heads/master
Commit: 8b9ae9cbb7d77e04924d4df260ce09372f47e3e6
Parents: b61e2b9
Author: Laimonas Simutis <la...@gmail.com>
Authored: Wed Apr 15 20:59:34 2015 -0400
Committer: Laimonas Simutis <la...@gmail.com>
Committed: Wed Apr 15 20:59:34 2015 -0400

----------------------------------------------------------------------
 src/Lucene.Net.Core/Util/Fst/BytesStore.cs                | 10 +++++-----
 .../Codecs/lucene42/Lucene42DocValuesConsumer.cs          |  3 ++-
 .../Index/BaseDocValuesFormatTestCase.cs                  |  2 +-
 3 files changed, 8 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8b9ae9cb/src/Lucene.Net.Core/Util/Fst/BytesStore.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Core/Util/Fst/BytesStore.cs b/src/Lucene.Net.Core/Util/Fst/BytesStore.cs
index 472d630..bf81fb5 100644
--- a/src/Lucene.Net.Core/Util/Fst/BytesStore.cs
+++ b/src/Lucene.Net.Core/Util/Fst/BytesStore.cs
@@ -520,12 +520,12 @@ namespace Lucene.Net.Util.Fst
             public ReverseBytesReaderAnonymousInner(BytesStore outerInstance)
             {
                 this.OuterInstance = outerInstance;
-                outerInstance.Current = outerInstance.Blocks.Count == 0 ? null : outerInstance.Blocks[0];
+                Current = outerInstance.Blocks.Count == 0 ? null : outerInstance.Blocks[0];
                 nextBuffer = -1;
                 nextRead = 0;
             }
 
-            private sbyte[] Current;
+            private byte[] Current;
             private int nextBuffer;
             private int nextRead;
 
@@ -533,10 +533,10 @@ namespace Lucene.Net.Util.Fst
             {
                 if (nextRead == -1)
                 {
-                    OuterInstance.Current = OuterInstance.Blocks[nextBuffer--];
+                    Current = OuterInstance.Blocks[nextBuffer--];
                     nextRead = OuterInstance.BlockSize - 1;
                 }
-                return OuterInstance.Current[nextRead--];
+                return Current[nextRead--];
             }
 
             public override void SkipBytes(int count)
@@ -566,7 +566,7 @@ namespace Lucene.Net.Util.Fst
                     // EOF)...?
                     int bufferIndex = (int)(value >> OuterInstance.blockBits);
                     nextBuffer = bufferIndex - 1;
-                    OuterInstance.Current = OuterInstance.Blocks[bufferIndex];
+                    Current = OuterInstance.Blocks[bufferIndex];
                     nextRead = (int)(value & OuterInstance.BlockMask);
                     Debug.Assert(this.Position == value, "value=" + value + " this.Position=" + this.Position);
                 }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8b9ae9cb/src/Lucene.Net.TestFramework/Codecs/lucene42/Lucene42DocValuesConsumer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.TestFramework/Codecs/lucene42/Lucene42DocValuesConsumer.cs b/src/Lucene.Net.TestFramework/Codecs/lucene42/Lucene42DocValuesConsumer.cs
index a479571..80d6c27 100644
--- a/src/Lucene.Net.TestFramework/Codecs/lucene42/Lucene42DocValuesConsumer.cs
+++ b/src/Lucene.Net.TestFramework/Codecs/lucene42/Lucene42DocValuesConsumer.cs
@@ -296,7 +296,8 @@ namespace Lucene.Net.Codecs.Lucene42
                 builder.Add(Util.ToIntsRef(v, scratch), ord);
                 ord++;
             }
-            Lucene.Net.Util.Fst.FST<long?> fst = builder.Finish();
+
+            var fst = builder.Finish();
             if (fst != null)
             {
                 fst.Save(Data);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/8b9ae9cb/src/Lucene.Net.TestFramework/Index/BaseDocValuesFormatTestCase.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.TestFramework/Index/BaseDocValuesFormatTestCase.cs b/src/Lucene.Net.TestFramework/Index/BaseDocValuesFormatTestCase.cs
index 970805c..ae7e1fc 100644
--- a/src/Lucene.Net.TestFramework/Index/BaseDocValuesFormatTestCase.cs
+++ b/src/Lucene.Net.TestFramework/Index/BaseDocValuesFormatTestCase.cs
@@ -992,7 +992,7 @@ namespace Lucene.Net.Index
             Document doc = new Document();
             var bytes = new byte[32766];
             BytesRef b = new BytesRef(bytes);
-            Random().NextBytes((byte[])(Array)bytes);
+            Random().NextBytes(bytes);
             doc.Add(new BinaryDocValuesField("dv", b));
             iwriter.AddDocument(doc);
             iwriter.Dispose();


[3/3] lucenenet git commit: More porting work

Posted by sy...@apache.org.
More porting work


Project: http://git-wip-us.apache.org/repos/asf/lucenenet/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucenenet/commit/c0aa821d
Tree: http://git-wip-us.apache.org/repos/asf/lucenenet/tree/c0aa821d
Diff: http://git-wip-us.apache.org/repos/asf/lucenenet/diff/c0aa821d

Branch: refs/heads/master
Commit: c0aa821d4edde76bcc8fc4a504ce9feaa04e2e30
Parents: 8b9ae9c
Author: Itamar Syn-Hershko <it...@code972.com>
Authored: Thu Apr 16 04:30:11 2015 +0300
Committer: Itamar Syn-Hershko <it...@code972.com>
Committed: Thu Apr 16 04:30:11 2015 +0300

----------------------------------------------------------------------
 .../Analysis/Core/TypeTokenFilter.cs            |   4 +-
 .../Analysis/Core/UpperCaseFilterFactory.cs     |   1 -
 .../Analysis/Core/WhitespaceTokenizerFactory.cs |  13 +-
 .../Analysis/Miscellaneous/TrimFilter.cs        |  19 +-
 .../Analysis/Miscellaneous/TrimFilterFactory.cs |   1 +
 .../Miscellaneous/WordDelimiterFilter.cs        |  91 ++-
 .../Miscellaneous/WordDelimiterFilterFactory.cs |   4 +-
 .../Miscellaneous/WordDelimiterIterator.cs      |  21 +-
 .../Path/ReversePathHierarchyTokenizer.cs       |   8 +-
 .../Analysis/Util/AbstractAnalysisFactory.cs    | 656 +++++++++----------
 10 files changed, 399 insertions(+), 419 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Core/TypeTokenFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/TypeTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/TypeTokenFilter.cs
index c546f3a..f3890db 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/TypeTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/TypeTokenFilter.cs
@@ -2,6 +2,7 @@
 using System.Collections.Generic;
 using Lucene.Net.Analysis.Tokenattributes;
 using Lucene.Net.Analysis.Util;
+using Version = Lucene.Net.Util.LuceneVersion;
 
 namespace Lucene.Net.Analysis.Core
 {
@@ -29,7 +30,7 @@ namespace Lucene.Net.Analysis.Core
     {
 
         private readonly HashSet<string> stopTypes;
-        private readonly TypeAttribute typeAttribute = addAttribute(typeof(TypeAttribute));
+        private readonly ITypeAttribute typeAttribute;
         private readonly bool useWhiteList;
 
         /// @deprecated enablePositionIncrements=false is not supported anymore as of Lucene 4.4. 
@@ -37,6 +38,7 @@ namespace Lucene.Net.Analysis.Core
         public TypeTokenFilter(Version version, bool enablePositionIncrements, TokenStream input, HashSet<string> stopTypes, bool useWhiteList)
             : base(version, enablePositionIncrements, input)
         {
+            typeAttribute = AddAttribute<ITypeAttribute>();
             this.stopTypes = stopTypes;
             this.useWhiteList = useWhiteList;
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Core/UpperCaseFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/UpperCaseFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/UpperCaseFilterFactory.cs
index e6b78b2..7cc089e 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/UpperCaseFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/UpperCaseFilterFactory.cs
@@ -1,6 +1,5 @@
 using System.Collections.Generic;
 using Lucene.Net.Analysis.Util;
-using org.apache.lucene.analysis.core;
 
 namespace Lucene.Net.Analysis.Core
 {

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizerFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizerFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizerFactory.cs
index 7cddbd4..33469c1 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizerFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Core/WhitespaceTokenizerFactory.cs
@@ -1,11 +1,9 @@
 using System.Collections.Generic;
-using Lucene.Net.Analysis;
-using Lucene.Net.Analysis.Core;
+using System.IO;
+using Lucene.Net.Analysis.Util;
 using Lucene.Net.Util;
-using TokenizerFactory = Lucene.Net.Analysis.Util.TokenizerFactory;
-using Reader = System.IO.TextReader;
 
-namespace org.apache.lucene.analysis.core
+namespace Lucene.Net.Analysis.Core
 {
 
     /*
@@ -24,9 +22,6 @@ namespace org.apache.lucene.analysis.core
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
-
-    using TokenizerFactory = TokenizerFactory;
-
     /// <summary>
     /// Factory for <seealso cref="WhitespaceTokenizer"/>. 
     /// <pre class="prettyprint">
@@ -51,7 +46,7 @@ namespace org.apache.lucene.analysis.core
             }
         }
 
-        public override Tokenizer Create(AttributeSource.AttributeFactory factory, Reader input)
+        public override Tokenizer Create(AttributeSource.AttributeFactory factory, TextReader input)
         {
             return new WhitespaceTokenizer(luceneMatchVersion, factory, input);
         }

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
index 5a511bc..6e57cc6 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
@@ -1,6 +1,4 @@
-using System;
-
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -16,15 +14,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+using System;
+using Lucene.Net.Analysis.Tokenattributes;
 
-namespace org.apache.lucene.analysis.miscellaneous
+namespace Lucene.Net.Analysis.Miscellaneous
 {
-
-	using CharTermAttribute = org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-	using OffsetAttribute = org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-	using Version = org.apache.lucene.util.Version;
-
-	/// <summary>
+    /// <summary>
 	/// Trims leading and trailing whitespace from Tokens in the stream.
 	/// <para>As of Lucene 4.4, this filter does not support updateOffsets=true anymore
 	/// as it can lead to broken token streams.
@@ -34,8 +29,8 @@ namespace org.apache.lucene.analysis.miscellaneous
 	{
 
 	  internal readonly bool updateOffsets;
-	  private readonly CharTermAttribute termAtt = addAttribute(typeof(CharTermAttribute));
-	  private readonly OffsetAttribute offsetAtt = addAttribute(typeof(OffsetAttribute));
+	  private readonly ICharTermAttribute termAtt = addAttribute(typeof(CharTermAttribute));
+	  private readonly IOffsetAttribute offsetAtt = addAttribute(typeof(OffsetAttribute));
 
 	  /// <summary>
 	  /// Create a new <seealso cref="TrimFilter"/>. </summary>

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
index 212d555..8ed68ac 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilterFactory.cs
@@ -1,4 +1,5 @@
 using System.Collections.Generic;
+using Lucene.Net.Analysis.Miscellaneous;
 using TokenFilterFactory = Lucene.Net.Analysis.Util.TokenFilterFactory;
 
 namespace org.apache.lucene.analysis.miscellaneous

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
index be6ba5d..5209eb9 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilter.cs
@@ -1,6 +1,4 @@
-using System;
-using System.Text;
-/*
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
@@ -16,28 +14,19 @@ using System.Text;
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+using System;
+using System.Text;
 using Lucene.Net.Analysis.Core;
 using Lucene.Net.Analysis.Standard;
+using Lucene.Net.Analysis.Tokenattributes;
 using Lucene.Net.Analysis.Util;
+using Lucene.Net.Support;
+using Lucene.Net.Util;
+using org.apache.lucene.analysis.miscellaneous;
 
-namespace org.apache.lucene.analysis.miscellaneous
+namespace Lucene.Net.Analysis.Miscellaneous
 {
-
-	using WhitespaceTokenizer = WhitespaceTokenizer;
-	using StandardTokenizer = StandardTokenizer;
-	using OffsetAttribute = org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-	using PositionIncrementAttribute = org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-	using CharTermAttribute = org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-	using TypeAttribute = org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-	using CharArraySet = CharArraySet;
-	using ArrayUtil = org.apache.lucene.util.ArrayUtil;
-	using AttributeSource = org.apache.lucene.util.AttributeSource;
-	using InPlaceMergeSorter = org.apache.lucene.util.InPlaceMergeSorter;
-	using RamUsageEstimator = org.apache.lucene.util.RamUsageEstimator;
-	using Version = org.apache.lucene.util.Version;
-
-
-	/// <summary>
+    /// <summary>
 	/// Splits words into subwords and performs optional transformations on subword
 	/// groups. Words are split into subwords with the following rules:
 	/// <ul>
@@ -176,10 +165,10 @@ namespace org.apache.lucene.analysis.miscellaneous
 
 	  private readonly int flags;
 
-	  private readonly CharTermAttribute termAttribute = addAttribute(typeof(CharTermAttribute));
-	  private readonly OffsetAttribute offsetAttribute = addAttribute(typeof(OffsetAttribute));
-	  private readonly PositionIncrementAttribute posIncAttribute = addAttribute(typeof(PositionIncrementAttribute));
-	  private readonly TypeAttribute typeAttribute = addAttribute(typeof(TypeAttribute));
+	  private readonly ICharTermAttribute termAttribute = addAttribute(typeof(CharTermAttribute));
+	  private readonly IOffsetAttribute offsetAttribute = addAttribute(typeof(OffsetAttribute));
+	  private readonly IPositionIncrementAttribute posIncAttribute = addAttribute(typeof(PositionIncrementAttribute));
+	  private readonly ITypeAttribute typeAttribute = addAttribute(typeof(TypeAttribute));
 
 	  // used for iterating word delimiter breaks
 	  private readonly WordDelimiterIterator iterator;
@@ -249,22 +238,20 @@ namespace org.apache.lucene.analysis.miscellaneous
 		  }
 	  }
 
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: @Override public boolean incrementToken() throws java.io.IOException
-	  public override bool incrementToken()
+	  public override bool IncrementToken()
 	  {
 		while (true)
 		{
 		  if (!hasSavedState)
 		  {
 			// process a new input word
-			if (!input.incrementToken())
+			if (!input.IncrementToken())
 			{
 			  return false;
 			}
 
-			int termLength = termAttribute.length();
-			char[] termBuffer = termAttribute.buffer();
+			int termLength = termAttribute.Length;
+			char[] termBuffer = termAttribute.Buffer();
 
 			accumPosInc += posIncAttribute.PositionIncrement;
 
@@ -272,7 +259,7 @@ namespace org.apache.lucene.analysis.miscellaneous
 			iterator.next();
 
 			// word of no delimiters, or protected word: just return it
-			if ((iterator.current == 0 && iterator.end == termLength) || (protWords != null && protWords.contains(termBuffer, 0, termLength)))
+			if ((iterator.current == 0 && iterator.end == termLength) || (protWords != null && protWords.Contains(termBuffer, 0, termLength)))
 			{
 			  posIncAttribute.PositionIncrement = accumPosInc;
 			  accumPosInc = 0;
@@ -335,10 +322,10 @@ namespace org.apache.lucene.analysis.miscellaneous
 			{
 			  if (bufferedPos == 0)
 			  {
-				sorter.sort(0, bufferedLen);
+				sorter.Sort(0, bufferedLen);
 			  }
-			  clearAttributes();
-			  restoreState(buffered[bufferedPos++]);
+			  ClearAttributes();
+			  RestoreState(buffered[bufferedPos++]);
 			  if (first && posIncAttribute.PositionIncrement == 0)
 			  {
 				// can easily happen with strange combinations (e.g. not outputting numbers, but concat-all)
@@ -404,11 +391,9 @@ namespace org.apache.lucene.analysis.miscellaneous
 		}
 	  }
 
-//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET:
-//ORIGINAL LINE: @Override public void reset() throws java.io.IOException
-	  public override void reset()
+	  public override void Reset()
 	  {
-		base.reset();
+		base.Reset();
 		hasSavedState = false;
 		concat.clear();
 		concatAll.clear();
@@ -435,7 +420,7 @@ namespace org.apache.lucene.analysis.miscellaneous
 			  this.outerInstance = outerInstance;
 		  }
 
-		protected internal override int compare(int i, int j)
+		protected override int Compare(int i, int j)
 		{
 		  int cmp = int.compare(outerInstance.startOff[i], outerInstance.startOff[j]);
 		  if (cmp == 0)
@@ -467,14 +452,14 @@ namespace org.apache.lucene.analysis.miscellaneous
 	  {
 		if (bufferedLen == buffered.Length)
 		{
-		  int newSize = ArrayUtil.oversize(bufferedLen + 1, 8);
-		  buffered = Arrays.copyOf(buffered, newSize);
-		  startOff = Arrays.copyOf(startOff, newSize);
-		  posInc = Arrays.copyOf(posInc, newSize);
+		  int newSize = ArrayUtil.Oversize(bufferedLen + 1, 8);
+		  buffered = Arrays.CopyOf(buffered, newSize);
+		  startOff = Arrays.CopyOf(startOff, newSize);
+		  posInc = Arrays.CopyOf(posInc, newSize);
 		}
-		startOff[bufferedLen] = offsetAttribute.startOffset();
+		startOff[bufferedLen] = offsetAttribute.StartOffset();
 		posInc[bufferedLen] = posIncAttribute.PositionIncrement;
-		buffered[bufferedLen] = captureState();
+		buffered[bufferedLen] = CaptureState();
 		bufferedLen++;
 	  }
 
@@ -484,8 +469,8 @@ namespace org.apache.lucene.analysis.miscellaneous
 	  private void saveState()
 	  {
 		// otherwise, we have delimiters, save state
-		savedStartOffset = offsetAttribute.startOffset();
-		savedEndOffset = offsetAttribute.endOffset();
+		savedStartOffset = offsetAttribute.StartOffset();
+		savedEndOffset = offsetAttribute.EndOffset();
 		// if length by start + end offsets doesn't match the term text then assume this is a synonym and don't adjust the offsets.
 		hasIllegalOffsets = (savedEndOffset - savedStartOffset != termAttribute.length());
 		savedType = typeAttribute.type();
@@ -558,8 +543,8 @@ namespace org.apache.lucene.analysis.miscellaneous
 	  /// <param name="isSingleWord"> {@code true} if the generation is occurring from a single word, {@code false} otherwise </param>
 	  private void generatePart(bool isSingleWord)
 	  {
-		clearAttributes();
-		termAttribute.copyBuffer(savedBuffer, iterator.current, iterator.end - iterator.current);
+		ClearAttributes();
+		termAttribute.CopyBuffer(savedBuffer, iterator.current, iterator.end - iterator.current);
 
 		int startOffset = savedStartOffset + iterator.current;
 		int endOffset = savedStartOffset + iterator.end;
@@ -570,16 +555,16 @@ namespace org.apache.lucene.analysis.miscellaneous
 		  // but we must do a sanity check:
 		  if (isSingleWord && startOffset <= savedEndOffset)
 		  {
-			offsetAttribute.setOffset(startOffset, savedEndOffset);
+			offsetAttribute.SetOffset(startOffset, savedEndOffset);
 		  }
 		  else
 		  {
-			offsetAttribute.setOffset(savedStartOffset, savedEndOffset);
+			offsetAttribute.SetOffset(savedStartOffset, savedEndOffset);
 		  }
 		}
 		else
 		{
-		  offsetAttribute.setOffset(startOffset, endOffset);
+		  offsetAttribute.SetOffset(startOffset, endOffset);
 		}
 		posIncAttribute.PositionIncrement = position(false);
 		typeAttribute.Type = savedType;
@@ -700,7 +685,7 @@ namespace org.apache.lucene.analysis.miscellaneous
 		/// </summary>
 		internal void write()
 		{
-		  clearAttributes();
+		  ClearAttributes();
 		  if (outerInstance.termAttribute.length() < buffer.Length)
 		  {
 			outerInstance.termAttribute.resizeBuffer(buffer.Length);

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
index 8b8e637..f1265d4 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterFilterFactory.cs
@@ -122,7 +122,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
 		}
 	  }
 
-	  public override TokenFilter Create(TokenStream input)
+	  public override TokenStream Create(TokenStream input)
 	  {
 		if (luceneMatchVersion.OnOrAfter(LuceneVersion.LUCENE_48))
 		{
@@ -130,7 +130,7 @@ namespace Lucene.Net.Analysis.Miscellaneous
 		}
 		else
 		{
-		  return new Lucene47WordDelimiterFilter(input, typeTable == null ? WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE : typeTable, flags, protectedWords);
+		  return new Lucene47WordDelimiterFilter(input, typeTable ?? WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, flags, protectedWords);
 		}
 	  }
 

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
index 154176b..e43d407 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/WordDelimiterIterator.cs
@@ -1,4 +1,7 @@
-namespace org.apache.lucene.analysis.miscellaneous
+using Lucene.Net.Analysis.Miscellaneous;
+using Lucene.Net.Support;
+
+namespace org.apache.lucene.analysis.miscellaneous
 {
 
 	/*
@@ -84,19 +87,19 @@
 	  // done if separated by these chars?) "," would be an obvious candidate...
 	  static WordDelimiterIterator()
 	  {
-		sbyte[] tab = new sbyte[256];
+		var tab = new sbyte[256];
 		for (int i = 0; i < 256; i++)
 		{
 		  sbyte code = 0;
-		  if (char.IsLower(i))
+		  if (char.IsLower((char)i))
 		  {
 			code |= (sbyte)WordDelimiterFilter.LOWER;
 		  }
-		  else if (char.IsUpper(i))
+		  else if (char.IsUpper((char)i))
 		  {
 			code |= (sbyte)WordDelimiterFilter.UPPER;
 		  }
-		  else if (char.IsDigit(i))
+		  else if (char.IsDigit((char)i))
 		  {
 			code |= (sbyte)WordDelimiterFilter.DIGIT;
 		  }
@@ -221,22 +224,22 @@
 	  /// <returns> {@code true} if the transition indicates a break, {@code false} otherwise </returns>
 	  private bool isBreak(int lastType, int type)
 	  {
-		if ((type_Renamed & lastType) != 0)
+		if ((type & lastType) != 0)
 		{
 		  return false;
 		}
 
-		if (!splitOnCaseChange && WordDelimiterFilter.isAlpha(lastType) && WordDelimiterFilter.isAlpha(type_Renamed))
+		if (!splitOnCaseChange && WordDelimiterFilter.isAlpha(lastType) && WordDelimiterFilter.isAlpha(type))
 		{
 		  // ALPHA->ALPHA: always ignore if case isn't considered.
 		  return false;
 		}
-		else if (WordDelimiterFilter.isUpper(lastType) && WordDelimiterFilter.isAlpha(type_Renamed))
+		else if (WordDelimiterFilter.isUpper(lastType) && WordDelimiterFilter.isAlpha(type))
 		{
 		  // UPPER->letter: Don't split
 		  return false;
 		}
-		else if (!splitOnNumerics && ((WordDelimiterFilter.isAlpha(lastType) && WordDelimiterFilter.isDigit(type_Renamed)) || (WordDelimiterFilter.isDigit(lastType) && WordDelimiterFilter.isAlpha(type_Renamed))))
+		else if (!splitOnNumerics && ((WordDelimiterFilter.isAlpha(lastType) && WordDelimiterFilter.isDigit(type)) || (WordDelimiterFilter.isDigit(lastType) && WordDelimiterFilter.isAlpha(type))))
 		{
 		  // ALPHA->NUMERIC, NUMERIC->ALPHA :Don't split
 		  return false;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs b/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
index 47a5d0f..f2bddbe 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Path/ReversePathHierarchyTokenizer.cs
@@ -109,7 +109,7 @@ namespace Lucene.Net.Analysis.Path
             this.skip = skip;
             resultToken = new StringBuilder(bufferSize);
             resultTokenBuffer = new char[bufferSize];
-            delimiterPositions = new List<int?>(bufferSize / 10);
+            delimiterPositions = new List<int>(bufferSize / 10);
         }
 
         private const int DEFAULT_BUFFER_SIZE = 1024;
@@ -129,7 +129,7 @@ namespace Lucene.Net.Analysis.Path
         private int skipped = 0;
         private readonly StringBuilder resultToken;
 
-        private readonly IList<int?> delimiterPositions;
+        private readonly IList<int> delimiterPositions;
         private int delimitersCount = -1;
         private char[] resultTokenBuffer;
 
@@ -168,7 +168,7 @@ namespace Lucene.Net.Analysis.Path
                 {
                     resultTokenBuffer = new char[resultToken.Length];
                 }
-                resultToken.GetChars(0, resultToken.Length, resultTokenBuffer, 0);
+                resultToken.CopyTo(0, resultTokenBuffer, 0, resultToken.Length);
                 resultToken.Length = 0;
                 int idx = delimitersCount - 1 - skip;
                 if (idx >= 0)
@@ -186,7 +186,7 @@ namespace Lucene.Net.Analysis.Path
 
             while (skipped < delimitersCount - skip - 1)
             {
-                var start = delimiterPositions[skipped] ?? 0;
+                var start = delimiterPositions[skipped];
                 termAtt.CopyBuffer(resultTokenBuffer, start, endPosition - start);
                 offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(endPosition));
                 skipped++;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/c0aa821d/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
index 5cbedda..64dde54 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Util/AbstractAnalysisFactory.cs
@@ -11,235 +11,235 @@ using Version = Lucene.Net.Util.LuceneVersion;
 namespace Lucene.Net.Analysis.Util
 {
 
-	/*
-	 * Licensed to the Apache Software Foundation (ASF) under one or more
-	 * contributor license agreements.  See the NOTICE file distributed with
-	 * this work for additional information regarding copyright ownership.
-	 * The ASF licenses this file to You under the Apache License, Version 2.0
-	 * (the "License"); you may not use this file except in compliance with
-	 * the License.  You may obtain a copy of the License at
-	 *
-	 *     http://www.apache.org/licenses/LICENSE-2.0
-	 *
-	 * Unless required by applicable law or agreed to in writing, software
-	 * distributed under the License is distributed on an "AS IS" BASIS,
-	 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-	 * See the License for the specific language governing permissions and
-	 * limitations under the License.
-	 */
+    /*
+     * Licensed to the Apache Software Foundation (ASF) under one or more
+     * contributor license agreements.  See the NOTICE file distributed with
+     * this work for additional information regarding copyright ownership.
+     * The ASF licenses this file to You under the Apache License, Version 2.0
+     * (the "License"); you may not use this file except in compliance with
+     * the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
     /// <summary>
-	/// Abstract parent class for analysis factories <seealso cref="TokenizerFactory"/>,
-	/// <seealso cref="TokenFilterFactory"/> and <seealso cref="CharFilterFactory"/>.
-	/// <para>
-	/// The typical lifecycle for a factory consumer is:
-	/// <ol>
-	///   <li>Create factory via its constructor (or via XXXFactory.forName)
-	///   <li>(Optional) If the factory uses resources such as files, <seealso cref="ResourceLoaderAware#inform(ResourceLoader)"/> is called to initialize those resources.
-	///   <li>Consumer calls create() to obtain instances.
-	/// </ol>
-	/// </para>
-	/// </summary>
-	public abstract class AbstractAnalysisFactory
-	{
-	  public const string LUCENE_MATCH_VERSION_PARAM = "luceneMatchVersion";
+    /// Abstract parent class for analysis factories <seealso cref="TokenizerFactory"/>,
+    /// <seealso cref="TokenFilterFactory"/> and <seealso cref="CharFilterFactory"/>.
+    /// <para>
+    /// The typical lifecycle for a factory consumer is:
+    /// <ol>
+    ///   <li>Create factory via its constructor (or via XXXFactory.forName)
+    ///   <li>(Optional) If the factory uses resources such as files, <seealso cref="ResourceLoaderAware#inform(ResourceLoader)"/> is called to initialize those resources.
+    ///   <li>Consumer calls create() to obtain instances.
+    /// </ol>
+    /// </para>
+    /// </summary>
+    public abstract class AbstractAnalysisFactory
+    {
+        public const string LUCENE_MATCH_VERSION_PARAM = "luceneMatchVersion";
 
-	  /// <summary>
-	  /// The original args, before any processing </summary>
-	  private readonly IDictionary<string, string> originalArgs;
+        /// <summary>
+        /// The original args, before any processing </summary>
+        private readonly IDictionary<string, string> originalArgs;
 
-	  /// <summary>
-	  /// the luceneVersion arg </summary>
-	  protected internal readonly LuceneVersion? luceneMatchVersion;
+        /// <summary>
+        /// the luceneVersion arg </summary>
+        protected internal readonly LuceneVersion? luceneMatchVersion;
 
         /// <summary>
-	  /// Initialize this factory via a set of key-value pairs.
-	  /// </summary>
-	  protected internal AbstractAnalysisFactory(IDictionary<string, string> args)
-	  {
-	      ExplicitLuceneMatchVersion = false;
-	      originalArgs = Collections.UnmodifiableMap(args);
-		string version = get(args, LUCENE_MATCH_VERSION_PARAM);
-		luceneMatchVersion = version == null ? (LuceneVersion?) null : LuceneVersionHelpers.ParseLeniently(version);
-		args.Remove(CLASS_NAME); // consume the class arg
-	  }
+        /// Initialize this factory via a set of key-value pairs.
+        /// </summary>
+        protected internal AbstractAnalysisFactory(IDictionary<string, string> args)
+        {
+            ExplicitLuceneMatchVersion = false;
+            originalArgs = Collections.UnmodifiableMap(args);
+            string version = get(args, LUCENE_MATCH_VERSION_PARAM);
+            luceneMatchVersion = version == null ? (LuceneVersion?)null : LuceneVersionHelpers.ParseLeniently(version);
+            args.Remove(CLASS_NAME); // consume the class arg
+        }
 
-	  public IDictionary<string, string> OriginalArgs
-	  {
-		  get
-		  {
-			return originalArgs;
-		  }
-	  }
+        public IDictionary<string, string> OriginalArgs
+        {
+            get
+            {
+                return originalArgs;
+            }
+        }
 
-	   /// <summary>
-	   /// this method can be called in the <seealso cref="TokenizerFactory#create(java.io.Reader)"/>
-	   /// or <seealso cref="TokenFilterFactory#create(org.apache.lucene.analysis.TokenStream)"/> methods,
-	   /// to inform user, that for this factory a <seealso cref="#luceneMatchVersion"/> is required 
-	   /// </summary>
-	  protected internal void assureMatchVersion()
-	  {
-		if (luceneMatchVersion == null)
-		{
-		  throw new System.ArgumentException("Configuration Error: Factory '" + this.GetType().FullName + "' needs a 'luceneMatchVersion' parameter");
-		}
-	  }
+        /// <summary>
+        /// this method can be called in the <seealso cref="TokenizerFactory#create(java.io.Reader)"/>
+        /// or <seealso cref="TokenFilterFactory#create(org.apache.lucene.analysis.TokenStream)"/> methods,
+        /// to inform user, that for this factory a <seealso cref="#luceneMatchVersion"/> is required 
+        /// </summary>
+        protected internal void assureMatchVersion()
+        {
+            if (luceneMatchVersion == null)
+            {
+                throw new System.ArgumentException("Configuration Error: Factory '" + this.GetType().FullName + "' needs a 'luceneMatchVersion' parameter");
+            }
+        }
 
-	  public LuceneVersion? LuceneMatchVersion
-	  {
-		  get
-		  {
-			return this.luceneMatchVersion;
-		  }
-	  }
+        public LuceneVersion? LuceneMatchVersion
+        {
+            get
+            {
+                return this.luceneMatchVersion;
+            }
+        }
 
-	  public virtual string require(IDictionary<string, string> args, string name)
-	  {
-		string s = args.Remove(name);
-		if (s == null)
-		{
-		  throw new System.ArgumentException("Configuration Error: missing parameter '" + name + "'");
-		}
-		return s;
-	  }
-	  public virtual string require(IDictionary<string, string> args, string name, ICollection<string> allowedValues)
-	  {
-		return require(args, name, allowedValues, true);
-	  }
-	  public virtual string require(IDictionary<string, string> args, string name, ICollection<string> allowedValues, bool caseSensitive)
-	  {
-		string s = args.Remove(name);
-		if (s == null)
-		{
-		  throw new System.ArgumentException("Configuration Error: missing parameter '" + name + "'");
-		}
-		else
-		{
-		  foreach (string allowedValue in allowedValues)
-		  {
-			if (caseSensitive)
-			{
-			  if (s.Equals(allowedValue))
-			  {
-				return s;
-			  }
-			}
-			else
-			{
-			  if (s.Equals(allowedValue, StringComparison.CurrentCultureIgnoreCase))
-			  {
-				return s;
-			  }
-			}
-		  }
-		  throw new System.ArgumentException("Configuration Error: '" + name + "' value must be one of " + allowedValues);
-		}
-	  }
-	  public virtual string get(IDictionary<string, string> args, string name)
-	  {
-		return args.Remove(name); // defaultVal = null
-	  }
-	  public virtual string get(IDictionary<string, string> args, string name, string defaultVal)
-	  {
-		string s = args.Remove(name);
-		return s == null ? defaultVal : s;
-	  }
-	  public virtual string get(IDictionary<string, string> args, string name, ICollection<string> allowedValues)
-	  {
-		return get(args, name, allowedValues, null); // defaultVal = null
-	  }
-	  public virtual string get(IDictionary<string, string> args, string name, ICollection<string> allowedValues, string defaultVal)
-	  {
-		return get(args, name, allowedValues, defaultVal, true);
-	  }
-	  public virtual string get(IDictionary<string, string> args, string name, ICollection<string> allowedValues, string defaultVal, bool caseSensitive)
-	  {
-		string s = args.Remove(name);
-		if (s == null)
-		{
-		  return defaultVal;
-		}
-		else
-		{
-		  foreach (string allowedValue in allowedValues)
-		  {
-			if (caseSensitive)
-			{
-			  if (s.Equals(allowedValue))
-			  {
-				return s;
-			  }
-			}
-			else
-			{
-			  if (s.Equals(allowedValue, StringComparison.CurrentCultureIgnoreCase))
-			  {
-				return s;
-			  }
-			}
-		  }
-		  throw new System.ArgumentException("Configuration Error: '" + name + "' value must be one of " + allowedValues);
-		}
-	  }
+        public virtual string require(IDictionary<string, string> args, string name)
+        {
+            string s = args.Remove(name);
+            if (s == null)
+            {
+                throw new System.ArgumentException("Configuration Error: missing parameter '" + name + "'");
+            }
+            return s;
+        }
+        public virtual string require(IDictionary<string, string> args, string name, ICollection<string> allowedValues)
+        {
+            return require(args, name, allowedValues, true);
+        }
+        public virtual string require(IDictionary<string, string> args, string name, ICollection<string> allowedValues, bool caseSensitive)
+        {
+            string s = args.Remove(name);
+            if (s == null)
+            {
+                throw new System.ArgumentException("Configuration Error: missing parameter '" + name + "'");
+            }
+            else
+            {
+                foreach (string allowedValue in allowedValues)
+                {
+                    if (caseSensitive)
+                    {
+                        if (s.Equals(allowedValue))
+                        {
+                            return s;
+                        }
+                    }
+                    else
+                    {
+                        if (s.Equals(allowedValue, StringComparison.CurrentCultureIgnoreCase))
+                        {
+                            return s;
+                        }
+                    }
+                }
+                throw new System.ArgumentException("Configuration Error: '" + name + "' value must be one of " + allowedValues);
+            }
+        }
+        public virtual string get(IDictionary<string, string> args, string name)
+        {
+            return args.Remove(name); // defaultVal = null
+        }
+        public virtual string get(IDictionary<string, string> args, string name, string defaultVal)
+        {
+            string s = args.Remove(name);
+            return s == null ? defaultVal : s;
+        }
+        public virtual string get(IDictionary<string, string> args, string name, ICollection<string> allowedValues)
+        {
+            return get(args, name, allowedValues, null); // defaultVal = null
+        }
+        public virtual string get(IDictionary<string, string> args, string name, ICollection<string> allowedValues, string defaultVal)
+        {
+            return get(args, name, allowedValues, defaultVal, true);
+        }
+        public virtual string get(IDictionary<string, string> args, string name, ICollection<string> allowedValues, string defaultVal, bool caseSensitive)
+        {
+            string s = args.Remove(name);
+            if (s == null)
+            {
+                return defaultVal;
+            }
+            else
+            {
+                foreach (string allowedValue in allowedValues)
+                {
+                    if (caseSensitive)
+                    {
+                        if (s.Equals(allowedValue))
+                        {
+                            return s;
+                        }
+                    }
+                    else
+                    {
+                        if (s.Equals(allowedValue, StringComparison.CurrentCultureIgnoreCase))
+                        {
+                            return s;
+                        }
+                    }
+                }
+                throw new System.ArgumentException("Configuration Error: '" + name + "' value must be one of " + allowedValues);
+            }
+        }
 
-	  protected internal int requireInt(IDictionary<string, string> args, string name)
-	  {
-		return int.Parse(require(args, name));
-	  }
-	  protected internal int getInt(IDictionary<string, string> args, string name, int defaultVal)
-	  {
-		string s = args.Remove(name);
-		return s == null ? defaultVal : int.Parse(s);
-	  }
+        protected internal int requireInt(IDictionary<string, string> args, string name)
+        {
+            return int.Parse(require(args, name));
+        }
+        protected internal int getInt(IDictionary<string, string> args, string name, int defaultVal)
+        {
+            string s = args.Remove(name);
+            return s == null ? defaultVal : int.Parse(s);
+        }
 
-	  protected internal bool requireBoolean(IDictionary<string, string> args, string name)
-	  {
-		return bool.Parse(require(args, name));
-	  }
-	  protected internal bool getBoolean(IDictionary<string, string> args, string name, bool defaultVal)
-	  {
-		string s = args.Remove(name);
-		return s == null ? defaultVal : bool.Parse(s);
-	  }
+        protected internal bool requireBoolean(IDictionary<string, string> args, string name)
+        {
+            return bool.Parse(require(args, name));
+        }
+        protected internal bool getBoolean(IDictionary<string, string> args, string name, bool defaultVal)
+        {
+            string s = args.Remove(name);
+            return s == null ? defaultVal : bool.Parse(s);
+        }
 
-	  protected internal float requireFloat(IDictionary<string, string> args, string name)
-	  {
-		return float.Parse(require(args, name));
-	  }
-	  protected internal float getFloat(IDictionary<string, string> args, string name, float defaultVal)
-	  {
-		string s = args.Remove(name);
-		return s == null ? defaultVal : float.Parse(s);
-	  }
+        protected internal float requireFloat(IDictionary<string, string> args, string name)
+        {
+            return float.Parse(require(args, name));
+        }
+        protected internal float getFloat(IDictionary<string, string> args, string name, float defaultVal)
+        {
+            string s = args.Remove(name);
+            return s == null ? defaultVal : float.Parse(s);
+        }
 
-	  public virtual char requireChar(IDictionary<string, string> args, string name)
-	  {
-		return require(args, name)[0];
-	  }
-	  public virtual char getChar(IDictionary<string, string> args, string name, char defaultValue)
-	  {
-		string s = args.Remove(name);
-		if (s == null)
-		{
-		  return defaultValue;
-		}
-		else
-		{
-		  if (s.Length != 1)
-		  {
-			throw new System.ArgumentException(name + " should be a char. \"" + s + "\" is invalid");
-		  }
-		  else
-		  {
-			return s[0];
-		  }
-		}
-	  }
+        public virtual char requireChar(IDictionary<string, string> args, string name)
+        {
+            return require(args, name)[0];
+        }
+        public virtual char getChar(IDictionary<string, string> args, string name, char defaultValue)
+        {
+            string s = args.Remove(name);
+            if (s == null)
+            {
+                return defaultValue;
+            }
+            else
+            {
+                if (s.Length != 1)
+                {
+                    throw new System.ArgumentException(name + " should be a char. \"" + s + "\" is invalid");
+                }
+                else
+                {
+                    return s[0];
+                }
+            }
+        }
 
-	  private static readonly Pattern ITEM_PATTERN = Pattern.compile("[^,\\s]+");
+        private static readonly Pattern ITEM_PATTERN = Pattern.compile("[^,\\s]+");
 
-	  /// <summary>
-	  /// Returns whitespace- and/or comma-separated set of values, or null if none are found </summary>
-	  public virtual HashSet<string> getSet(IDictionary<string, string> args, string name)
+        /// <summary>
+        /// Returns whitespace- and/or comma-separated set of values, or null if none are found </summary>
+        public virtual HashSet<string> getSet(IDictionary<string, string> args, string name)
 	  {
 		string s = args.Remove(name);
 		if (s == null)
@@ -263,128 +263,128 @@ namespace Lucene.Net.Analysis.Util
 		}
 	  }
 
-	  /// <summary>
-	  /// Compiles a pattern for the value of the specified argument key <code>name</code> 
-	  /// </summary>
-	  protected internal Pattern GetPattern(IDictionary<string, string> args, string name)
-	  {
-		try
-		{
-		  return Pattern.compile(require(args, name));
-		}
-		catch (PatternSyntaxException e)
-		{
-		  throw new System.ArgumentException("Configuration Error: '" + name + "' can not be parsed in " + this.GetType().Name, e);
-		}
-	  }
+        /// <summary>
+        /// Compiles a pattern for the value of the specified argument key <code>name</code> 
+        /// </summary>
+        protected internal Pattern GetPattern(IDictionary<string, string> args, string name)
+        {
+            try
+            {
+                return Pattern.compile(require(args, name));
+            }
+            catch (PatternSyntaxException e)
+            {
+                throw new System.ArgumentException("Configuration Error: '" + name + "' can not be parsed in " + this.GetType().Name, e);
+            }
+        }
 
-	  /// <summary>
-	  /// Returns as <seealso cref="CharArraySet"/> from wordFiles, which
-	  /// can be a comma-separated list of filenames
-	  /// </summary>
-	  protected internal CharArraySet GetWordSet(ResourceLoader loader, string wordFiles, bool ignoreCase)
-	  {
-		assureMatchVersion();
-		IList<string> files = splitFileNames(wordFiles);
-		CharArraySet words = null;
-		if (files.Count > 0)
-		{
-		  // default stopwords list has 35 or so words, but maybe don't make it that
-		  // big to start
-		  words = new CharArraySet(luceneMatchVersion, files.Count * 10, ignoreCase);
-		  foreach (string file in files)
-		  {
-			var wlist = getLines(loader, file.Trim());
-			words.AddAll(StopFilter.makeStopSet(luceneMatchVersion, wlist, ignoreCase));
-		  }
-		}
-		return words;
-	  }
+        /// <summary>
+        /// Returns as <seealso cref="CharArraySet"/> from wordFiles, which
+        /// can be a comma-separated list of filenames
+        /// </summary>
+        protected internal CharArraySet GetWordSet(ResourceLoader loader, string wordFiles, bool ignoreCase)
+        {
+            assureMatchVersion();
+            IList<string> files = splitFileNames(wordFiles);
+            CharArraySet words = null;
+            if (files.Count > 0)
+            {
+                // default stopwords list has 35 or so words, but maybe don't make it that
+                // big to start
+                words = new CharArraySet(luceneMatchVersion, files.Count * 10, ignoreCase);
+                foreach (string file in files)
+                {
+                    var wlist = getLines(loader, file.Trim());
+                    words.AddAll(StopFilter.makeStopSet(luceneMatchVersion, wlist, ignoreCase));
+                }
+            }
+            return words;
+        }
 
-	  /// <summary>
-	  /// Returns the resource's lines (with content treated as UTF-8)
-	  /// </summary>
-	  protected internal IList<string> getLines(ResourceLoader loader, string resource)
-	  {
-		return WordlistLoader.getLines(loader.openResource(resource), StandardCharsets.UTF_8);
-	  }
+        /// <summary>
+        /// Returns the resource's lines (with content treated as UTF-8)
+        /// </summary>
+        protected internal IList<string> getLines(ResourceLoader loader, string resource)
+        {
+            return WordlistLoader.getLines(loader.openResource(resource), StandardCharsets.UTF_8);
+        }
 
-	  /// <summary>
-	  /// same as <seealso cref="#getWordSet(ResourceLoader, String, boolean)"/>,
-	  /// except the input is in snowball format. 
-	  /// </summary>
-	  protected internal CharArraySet getSnowballWordSet(ResourceLoader loader, string wordFiles, bool ignoreCase)
-	  {
-		assureMatchVersion();
-		IList<string> files = splitFileNames(wordFiles);
-		CharArraySet words = null;
-		if (files.Count > 0)
-		{
-		  // default stopwords list has 35 or so words, but maybe don't make it that
-		  // big to start
-		  words = new CharArraySet(luceneMatchVersion, files.Count * 10, ignoreCase);
-		  foreach (string file in files)
-		  {
-			InputStream stream = null;
-			TextReader reader = null;
-			try
-			{
-			  stream = loader.openResource(file.Trim());
-			  CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder().onMalformedInput(CodingErrorAction.REPORT).onUnmappableCharacter(CodingErrorAction.REPORT);
-			  reader = new InputStreamReader(stream, decoder);
-			  WordlistLoader.getSnowballWordSet(reader, words);
-			}
-			finally
-			{
-			  IOUtils.closeWhileHandlingException(reader, stream);
-			}
-		  }
-		}
-		return words;
-	  }
+        /// <summary>
+        /// same as <seealso cref="#getWordSet(ResourceLoader, String, boolean)"/>,
+        /// except the input is in snowball format. 
+        /// </summary>
+        protected internal CharArraySet getSnowballWordSet(ResourceLoader loader, string wordFiles, bool ignoreCase)
+        {
+            assureMatchVersion();
+            IList<string> files = splitFileNames(wordFiles);
+            CharArraySet words = null;
+            if (files.Count > 0)
+            {
+                // default stopwords list has 35 or so words, but maybe don't make it that
+                // big to start
+                words = new CharArraySet(luceneMatchVersion, files.Count * 10, ignoreCase);
+                foreach (string file in files)
+                {
+                    InputStream stream = null;
+                    TextReader reader = null;
+                    try
+                    {
+                        stream = loader.openResource(file.Trim());
+                        CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder().onMalformedInput(CodingErrorAction.REPORT).onUnmappableCharacter(CodingErrorAction.REPORT);
+                        reader = new InputStreamReader(stream, decoder);
+                        WordlistLoader.getSnowballWordSet(reader, words);
+                    }
+                    finally
+                    {
+                        IOUtils.closeWhileHandlingException(reader, stream);
+                    }
+                }
+            }
+            return words;
+        }
 
-	  /// <summary>
-	  /// Splits file names separated by comma character.
-	  /// File names can contain comma characters escaped by backslash '\'
-	  /// </summary>
-	  /// <param name="fileNames"> the string containing file names </param>
-	  /// <returns> a list of file names with the escaping backslashed removed </returns>
-	  protected internal IList<string> splitFileNames(string fileNames)
-	  {
-		if (fileNames == null)
-		{
-		  return System.Linq.Enumerable.Empty<string>();
-		}
+        /// <summary>
+        /// Splits file names separated by comma character.
+        /// File names can contain comma characters escaped by backslash '\'
+        /// </summary>
+        /// <param name="fileNames"> the string containing file names </param>
+        /// <returns> a list of file names with the escaping backslashed removed </returns>
+        protected internal IList<string> splitFileNames(string fileNames)
+        {
+            if (fileNames == null)
+            {
+                return System.Linq.Enumerable.Empty<string>();
+            }
 
-		IList<string> result = new List<string>();
-		foreach (string file in fileNames.Split("(?<!\\\\),", true))
-		{
-		  result.Add(file.replaceAll("\\\\(?=,)", ""));
-		}
+            IList<string> result = new List<string>();
+            foreach (string file in fileNames.Split("(?<!\\\\),", true))
+            {
+                result.Add(file.replaceAll("\\\\(?=,)", ""));
+            }
 
-		return result;
-	  }
+            return result;
+        }
 
-	  private const string CLASS_NAME = "class";
+        private const string CLASS_NAME = "class";
 
-	  /// <returns> the string used to specify the concrete class name in a serialized representation: the class arg.  
-	  ///         If the concrete class name was not specified via a class arg, returns {@code getClass().getName()}. </returns>
-	  public virtual string ClassArg
-	  {
-		  get
-		  {
-			if (null != originalArgs)
-			{
-			  string className = originalArgs[CLASS_NAME];
-			  if (null != className)
-			  {
-				return className;
-			  }
-			}
-			return this.GetType().Name;
-		  }
-	  }
+        /// <returns> the string used to specify the concrete class name in a serialized representation: the class arg.  
+        ///         If the concrete class name was not specified via a class arg, returns {@code getClass().getName()}. </returns>
+        public virtual string ClassArg
+        {
+            get
+            {
+                if (null != originalArgs)
+                {
+                    string className = originalArgs[CLASS_NAME];
+                    if (null != className)
+                    {
+                        return className;
+                    }
+                }
+                return this.GetType().Name;
+            }
+        }
 
         public virtual bool ExplicitLuceneMatchVersion { get; set; }
-	}
+    }
 }
\ No newline at end of file