You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by mh...@apache.org on 2013/09/24 20:32:55 UTC
[19/50] [abbrv] Massive cleanup, reducing compiler errors
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs b/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
index d91cc23..6aef620 100644
--- a/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
+++ b/src/core/Codecs/Compressing/CompressingTermVectorsWriter.cs
@@ -10,121 +10,121 @@ using System.Text;
namespace Lucene.Net.Codecs.Compressing
{
- public sealed class CompressingTermVectorsWriter: TermVectorsWriter
+ public sealed class CompressingTermVectorsWriter : TermVectorsWriter
{
- public static int MAX_DOCUMENTS_PER_CHUNK = 128;
+ public const int MAX_DOCUMENTS_PER_CHUNK = 128;
- static string VECTORS_EXTENSION = "tvd";
- static string VECTORS_INDEX_EXTENSION = "tvx";
+ internal const string VECTORS_EXTENSION = "tvd";
+ internal const string VECTORS_INDEX_EXTENSION = "tvx";
- static string CODEC_SFX_IDX = "Index";
- static string CODEC_SFX_DAT = "Data";
+ internal const string CODEC_SFX_IDX = "Index";
+ internal const string CODEC_SFX_DAT = "Data";
- static int VERSION_START = 0;
- static int VERSION_CURRENT = VERSION_START;
+ internal const int VERSION_START = 0;
+ internal const int VERSION_CURRENT = VERSION_START;
- static int BLOCK_SIZE = 64;
+ internal const int BLOCK_SIZE = 64;
- static int POSITIONS = 0x01;
- static int OFFSETS = 0x02;
- static int PAYLOADS = 0x04;
- static int FLAGS_BITS = PackedInts.BitsRequired(POSITIONS | OFFSETS | PAYLOADS);
+ internal const int POSITIONS = 0x01;
+ internal const int OFFSETS = 0x02;
+ internal const int PAYLOADS = 0x04;
+ internal static readonly int FLAGS_BITS = PackedInts.BitsRequired(POSITIONS | OFFSETS | PAYLOADS);
- private Directory directory;
- private string segment;
- private string segmentSuffix;
+ private readonly Directory directory;
+ private readonly string segment;
+ private readonly string segmentSuffix;
private CompressingStoredFieldsIndexWriter indexWriter;
private IndexOutput vectorsStream;
- private CompressionMode compressionMode;
- private Compressor compressor;
- private int chunkSize;
+ private readonly CompressionMode compressionMode;
+ private readonly Compressor compressor;
+ private readonly int chunkSize;
- private int numDocs; // total number of docs seen
- private Deque<DocData> pendingDocs; // pending docs
- private DocData curDoc; // current document
- private FieldData curField; // current field
- private BytesRef lastTerm;
- private int[] positionsBuf, startOffsetsBuf, lengthsBuf, payloadLengthsBuf;
- private GrowableByteArrayDataOutput termSuffixes; // buffered term suffixes
- private GrowableByteArrayDataOutput payloadBytes; // buffered term payloads
- private BlockPackedWriter writer;
-
/** a pending doc */
- private class DocData
+ private class DocData
{
- int numFields;
- Deque<FieldData> fields;
- int posStart, offStart, payStart;
- DocData(int numFields, int posStart, int offStart, int payStart) {
+ internal readonly int numFields;
+ internal readonly LinkedList<FieldData> fields;
+ internal readonly int posStart, offStart, payStart;
+
+ private readonly CompressingTermVectorsWriter parent;
+
+ internal DocData(CompressingTermVectorsWriter parent, int numFields, int posStart, int offStart, int payStart)
+ {
+ this.parent = parent; // .NET Port
+
this.numFields = numFields;
- this.fields = new ArrayDeque<FieldData>(numFields);
+ this.fields = new LinkedList<FieldData>();
this.posStart = posStart;
this.offStart = offStart;
this.payStart = payStart;
}
- FieldData addField(int fieldNum, int numTerms, bool positions, bool offsets, bool payloads)
+ internal FieldData AddField(int fieldNum, int numTerms, bool positions, bool offsets, bool payloads)
{
FieldData field;
- if (fields.isEmpty())
+ if (fields.Count == 0)
{
- field = new FieldData(fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
- }
- else
+ field = new FieldData(parent, fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
+ }
+ else
{
- FieldData last = fields.getLast();
+ FieldData last = fields.Last.Value;
int posStart = last.posStart + (last.hasPositions ? last.totalPositions : 0);
int offStart = last.offStart + (last.hasOffsets ? last.totalPositions : 0);
int payStart = last.payStart + (last.hasPayloads ? last.totalPositions : 0);
- field = new FieldData(fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
+ field = new FieldData(parent, fieldNum, numTerms, positions, offsets, payloads, posStart, offStart, payStart);
}
- fields.add(field);
+ fields.AddLast(field);
return field;
}
}
- private DocData addDocData(int numVectorFields)
+ private DocData AddDocData(int numVectorFields)
{
FieldData last = null;
- for (Iterator<DocData> it = pendingDocs.descendingIterator(); it.hasNext(); )
+ foreach (DocData doc in pendingDocs.Reverse())
{
- final DocData doc = it.next();
- if (!doc.fields.isEmpty())
+ //DocData doc = it.next();
+ if (doc.fields.Count > 0)
{
- last = doc.fields.getLast();
+ last = doc.fields.Last.Value;
break;
}
}
- DocData doc;
- if (last == null)
+ DocData doc2;
+ if (last == null)
{
- doc = new DocData(numVectorFields, 0, 0, 0);
- }
- else
+ doc2 = new DocData(this, numVectorFields, 0, 0, 0);
+ }
+ else
{
int posStart = last.posStart + (last.hasPositions ? last.totalPositions : 0);
int offStart = last.offStart + (last.hasOffsets ? last.totalPositions : 0);
int payStart = last.payStart + (last.hasPayloads ? last.totalPositions : 0);
- doc = new DocData(numVectorFields, posStart, offStart, payStart);
+ doc2 = new DocData(this, numVectorFields, posStart, offStart, payStart);
}
- pendingDocs.add(doc);
- return doc;
+ pendingDocs.AddLast(doc2);
+ return doc2;
}
/** a pending field */
- private class FieldData
+ private class FieldData
{
- bool hasPositions, hasOffsets, hasPayloads;
- int fieldNum, flags, numTerms;
- int[] freqs, prefixLengths, suffixLengths;
- int posStart, offStart, payStart;
- int totalPositions;
- int ord;
+ internal readonly bool hasPositions, hasOffsets, hasPayloads;
+ internal readonly int fieldNum, flags, numTerms;
+ internal readonly int[] freqs, prefixLengths, suffixLengths;
+ internal readonly int posStart, offStart, payStart;
+ internal int totalPositions;
+ internal int ord;
+
+ private readonly CompressingTermVectorsWriter parent;
- public FieldData(int fieldNum, int numTerms, bool positions, bool offsets, bool payloads, int posStart, int offStart, int payStart)
+ public FieldData(CompressingTermVectorsWriter parent, int fieldNum, int numTerms, bool positions, bool offsets, bool payloads, int posStart, int offStart, int payStart)
{
+ this.parent = parent; // .NET Port
+
this.fieldNum = fieldNum;
this.numTerms = numTerms;
this.hasPositions = positions;
@@ -141,48 +141,61 @@ namespace Lucene.Net.Codecs.Compressing
ord = 0;
}
- public void addTerm(int freq, int prefixLength, int suffixLength)
+ public void AddTerm(int freq, int prefixLength, int suffixLength)
{
- freqs[ord] = freq;
- prefixLengths[ord] = prefixLength;
- suffixLengths[ord] = suffixLength;
- ++ord;
+ freqs[ord] = freq;
+ prefixLengths[ord] = prefixLength;
+ suffixLengths[ord] = suffixLength;
+ ++ord;
}
-
- public void addPosition(int position, int startOffset, int length, int payloadLength)
+
+ public void AddPosition(int position, int startOffset, int length, int payloadLength)
{
- if (hasPositions)
- {
- if (posStart + totalPositions == positionsBuf.length)
+ if (hasPositions)
{
- positionsBuf = ArrayUtil.grow(positionsBuf);
- }
+ if (posStart + totalPositions == parent.positionsBuf.Length)
+ {
+ parent.positionsBuf = ArrayUtil.Grow(parent.positionsBuf);
+ }
- positionsBuf[posStart + totalPositions] = position;
- }
- if (hasOffsets) {
- if (offStart + totalPositions == startOffsetsBuf.length)
+ parent.positionsBuf[posStart + totalPositions] = position;
+ }
+ if (hasOffsets)
{
- int newLength = ArrayUtil.Oversize(offStart + totalPositions, 4);
- startOffsetsBuf = Arrays.CopyOf(startOffsetsBuf, newLength);
- lengthsBuf = Arrays.CopyOf(lengthsBuf, newLength);
+ if (offStart + totalPositions == parent.startOffsetsBuf.Length)
+ {
+ int newLength = ArrayUtil.Oversize(offStart + totalPositions, 4);
+ parent.startOffsetsBuf = Arrays.CopyOf(parent.startOffsetsBuf, newLength);
+ parent.lengthsBuf = Arrays.CopyOf(parent.lengthsBuf, newLength);
+ }
+ parent.startOffsetsBuf[offStart + totalPositions] = startOffset;
+ parent.lengthsBuf[offStart + totalPositions] = length;
}
- startOffsetsBuf[offStart + totalPositions] = startOffset;
- lengthsBuf[offStart + totalPositions] = length;
- }
- if (hasPayloads) {
- if (payStart + totalPositions == payloadLengthsBuf.length) {
- payloadLengthsBuf = ArrayUtil.Grow(payloadLengthsBuf);
+ if (hasPayloads)
+ {
+ if (payStart + totalPositions == parent.payloadLengthsBuf.Length)
+ {
+ parent.payloadLengthsBuf = ArrayUtil.Grow(parent.payloadLengthsBuf);
+ }
+ parent.payloadLengthsBuf[payStart + totalPositions] = payloadLength;
}
- payloadLengthsBuf[payStart + totalPositions] = payloadLength;
- }
- ++totalPositions;
+ ++totalPositions;
}
}
+ private int numDocs; // total number of docs seen
+ private readonly LinkedList<DocData> pendingDocs; // pending docs
+ private DocData curDoc; // current document
+ private FieldData curField; // current field
+ private readonly BytesRef lastTerm;
+ private int[] positionsBuf, startOffsetsBuf, lengthsBuf, payloadLengthsBuf;
+ private readonly GrowableByteArrayDataOutput termSuffixes; // buffered term suffixes
+ private readonly GrowableByteArrayDataOutput payloadBytes; // buffered term payloads
+ private readonly BlockPackedWriter writer;
+
/** Sole constructor. */
public CompressingTermVectorsWriter(Directory directory, SegmentInfo si, string segmentSuffix, IOContext context,
- String formatName, CompressionMode compressionMode, int chunkSize)
+ String formatName, CompressionMode compressionMode, int chunkSize)
{
this.directory = directory;
this.segment = si.name;
@@ -192,20 +205,21 @@ namespace Lucene.Net.Codecs.Compressing
this.chunkSize = chunkSize;
numDocs = 0;
- pendingDocs = new ArrayDeque<DocData>();
+ pendingDocs = new LinkedList<DocData>();
termSuffixes = new GrowableByteArrayDataOutput(ArrayUtil.Oversize(chunkSize, 1));
payloadBytes = new GrowableByteArrayDataOutput(ArrayUtil.Oversize(1, 1));
lastTerm = new BytesRef(ArrayUtil.Oversize(30, 1));
bool success = false;
IndexOutput indexStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION), context);
- try {
+ try
+ {
vectorsStream = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION), context);
string codecNameIdx = formatName + CODEC_SFX_IDX;
string codecNameDat = formatName + CODEC_SFX_DAT;
- CodecUtil.writeHeader(indexStream, codecNameIdx, VERSION_CURRENT);
- CodecUtil.writeHeader(vectorsStream, codecNameDat, VERSION_CURRENT);
+ CodecUtil.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT);
+ CodecUtil.WriteHeader(vectorsStream, codecNameDat, VERSION_CURRENT);
indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
indexStream = null;
@@ -220,27 +234,31 @@ namespace Lucene.Net.Codecs.Compressing
payloadLengthsBuf = new int[1024];
success = true;
- } finally {
- if (!success) {
- IOUtils.CloseWhileHandlingException(indexStream);
- Abort();
+ }
+ finally
+ {
+ if (!success)
+ {
+ IOUtils.CloseWhileHandlingException((IDisposable)indexStream);
+ Abort();
}
}
}
public override void StartDocument(int numVectorFields)
{
- curDoc = addDocData(numVectorFields);
+ curDoc = AddDocData(numVectorFields);
}
- public override void FinishDocument()
+ public override void FinishDocument()
{
// append the payload bytes of the doc after its terms
termSuffixes.WriteBytes(payloadBytes.Bytes, payloadBytes.Length);
payloadBytes.Length = 0;
++numDocs;
- if (triggerFlush()) {
- Flush();
+ if (TriggerFlush())
+ {
+ Flush();
}
curDoc = null;
}
@@ -259,11 +277,12 @@ namespace Lucene.Net.Codecs.Compressing
public override void StartTerm(Util.BytesRef term, int freq)
{
int prefix = StringHelper.BytesDifference(lastTerm, term);
- curField.addTerm(freq, prefix, term.length - prefix);
+ curField.AddTerm(freq, prefix, term.length - prefix);
termSuffixes.WriteBytes(term.bytes, term.offset + prefix, term.length - prefix);
// copy last term
- if (lastTerm.bytes.Length < term.length) {
- lastTerm.bytes = new sbyte[ArrayUtil.Oversize(term.length, 1)];
+ if (lastTerm.bytes.Length < term.length)
+ {
+ lastTerm.bytes = new sbyte[ArrayUtil.Oversize(term.length, 1)];
}
lastTerm.offset = 0;
lastTerm.length = term.length;
@@ -272,346 +291,412 @@ namespace Lucene.Net.Codecs.Compressing
public override void AddPosition(int position, int startOffset, int endOffset, Util.BytesRef payload)
{
- curField.addPosition(position, startOffset, endOffset - startOffset, payload == null ? 0 : payload.length);
- if (curField.HasPayloads && payload != null)
+ curField.AddPosition(position, startOffset, endOffset - startOffset, payload == null ? 0 : payload.length);
+ if (curField.hasPayloads && payload != null)
{
payloadBytes.WriteBytes(payload.bytes, payload.offset, payload.length);
}
}
- private bool triggerFlush()
+ private bool TriggerFlush()
{
return termSuffixes.Length >= chunkSize
- || pendingDocs.size() >= MAX_DOCUMENTS_PER_CHUNK;
+ || pendingDocs.Count >= MAX_DOCUMENTS_PER_CHUNK;
}
- private void flush()
+ private void Flush()
{
- int chunkDocs = pendingDocs.size();
+ int chunkDocs = pendingDocs.Count;
// write the index file
- indexWriter.WriteIndex(chunkDocs, vectorsStream.GetFilePointer());
+ indexWriter.WriteIndex(chunkDocs, vectorsStream.FilePointer);
int docBase = numDocs - chunkDocs;
vectorsStream.WriteVInt(docBase);
vectorsStream.WriteVInt(chunkDocs);
// total number of fields of the chunk
- int totalFields = flushNumFields(chunkDocs);
-
- if (totalFields > 0) {
- // unique field numbers (sorted)
- int[] fieldNums = flushFieldNums();
- // offsets in the array of unique field numbers
- flushFields(totalFields, fieldNums);
- // flags (does the field have positions, offsets, payloads?)
- flushFlags(totalFields, fieldNums);
- // number of terms of each field
- flushNumTerms(totalFields);
- // prefix and suffix lengths for each field
- flushTermLengths();
- // term freqs - 1 (because termFreq is always >=1) for each term
- flushTermFreqs();
- // positions for all terms, when enabled
- flushPositions();
- // offsets for all terms, when enabled
- flushOffsets(fieldNums);
- // payload lengths for all terms, when enabled
- flushPayloadLengths();
-
- // compress terms and payloads and write them to the output
- compressor.Compress(termSuffixes.Bytes, 0, termSuffixes.Length, vectorsStream);
+ int totalFields = FlushNumFields(chunkDocs);
+
+ if (totalFields > 0)
+ {
+ // unique field numbers (sorted)
+ int[] fieldNums = FlushFieldNums();
+ // offsets in the array of unique field numbers
+ FlushFields(totalFields, fieldNums);
+ // flags (does the field have positions, offsets, payloads?)
+ FlushFlags(totalFields, fieldNums);
+ // number of terms of each field
+ FlushNumTerms(totalFields);
+ // prefix and suffix lengths for each field
+ FlushTermLengths();
+ // term freqs - 1 (because termFreq is always >=1) for each term
+ FlushTermFreqs();
+ // positions for all terms, when enabled
+ FlushPositions();
+ // offsets for all terms, when enabled
+ FlushOffsets(fieldNums);
+ // payload lengths for all terms, when enabled
+ FlushPayloadLengths();
+
+ // compress terms and payloads and write them to the output
+ compressor.Compress(termSuffixes.Bytes, 0, termSuffixes.Length, vectorsStream);
}
// reset
- pendingDocs.clear();
+ pendingDocs.Clear();
curDoc = null;
curField = null;
termSuffixes.Length = 0;
}
- private int flushNumFields(int chunkDocs)
+ private int FlushNumFields(int chunkDocs)
{
- if (chunkDocs == 1) {
- int numFields = pendingDocs.getFirst().numFields;
- vectorsStream.WriteVInt(numFields);
- return numFields;
- } else {
- writer.Reset(vectorsStream);
- int totalFields = 0;
- for (DocData dd : pendingDocs) {
- writer.Add(dd.numFields);
- totalFields += dd.numFields;
- }
- writer.Finish();
- return totalFields;
+ if (chunkDocs == 1)
+ {
+ int numFields = pendingDocs.First.Value.numFields;
+ vectorsStream.WriteVInt(numFields);
+ return numFields;
+ }
+ else
+ {
+ writer.Reset(vectorsStream);
+ int totalFields = 0;
+ foreach (DocData dd in pendingDocs)
+ {
+ writer.Add(dd.numFields);
+ totalFields += dd.numFields;
+ }
+ writer.Finish();
+ return totalFields;
}
}
- /** Returns a sorted array containing unique field numbers */
- private int[] flushFieldNums()
+ /** Returns a sorted array containing unique field numbers */
+ private int[] FlushFieldNums()
{
- SortedSet<int> fieldNums = new TreeSet<int>();
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- fieldNums.Add(fd.fieldNum);
+ SortedSet<int> fieldNums = new SortedSet<int>();
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ fieldNums.Add(fd.fieldNum);
}
}
- int numDistinctFields = fieldNums.size();
- int bitsRequired = PackedInts.bitsRequired(fieldNums.Last());
+ int numDistinctFields = fieldNums.Count;
+ int bitsRequired = PackedInts.BitsRequired(fieldNums.Last());
int token = (Math.Min(numDistinctFields - 1, 0x07) << 5) | bitsRequired;
- vectorsStream.WriteByte((byte) token);
- if (numDistinctFields - 1 >= 0x07) {
+ vectorsStream.WriteByte((byte)token);
+ if (numDistinctFields - 1 >= 0x07)
+ {
vectorsStream.WriteVInt(numDistinctFields - 1 - 0x07);
}
- PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldNums.size(), bitsRequired, 1);
- for (int fieldNum : fieldNums) {
+ PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldNums.Count, bitsRequired, 1);
+ foreach (int fieldNum in fieldNums)
+ {
writer.Add(fieldNum);
}
writer.Finish();
- int[] fns = new int[fieldNums.size()];
+ int[] fns = new int[fieldNums.Count];
int i = 0;
- for (int key : fieldNums) {
+ foreach (int key in fieldNums)
+ {
fns[i++] = key;
}
return fns;
}
- private void flushFields(int totalFields, int[] fieldNums) throws IOException {
- final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, PackedInts.bitsRequired(fieldNums.length - 1), 1);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- final int fieldNumIndex = Arrays.binarySearch(fieldNums, fd.fieldNum);
- assert fieldNumIndex >= 0;
- writer.add(fieldNumIndex);
+ private void FlushFields(int totalFields, int[] fieldNums)
+ {
+ PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, PackedInts.BitsRequired(fieldNums.Length - 1), 1);
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ int fieldNumIndex = Array.BinarySearch(fieldNums, fd.fieldNum);
+ //assert fieldNumIndex >= 0;
+ writer.Add(fieldNumIndex);
}
}
- writer.finish();
+ writer.Finish();
}
- private void flushFlags(int totalFields, int[] fieldNums)
+ private void FlushFlags(int totalFields, int[] fieldNums)
{
// check if fields always have the same flags
bool nonChangingFlags = true;
int[] fieldFlags = new int[fieldNums.Length];
Arrays.Fill(fieldFlags, -1);
- outer:
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- int fieldNumOff = Arrays.BinarySearch(fieldNums, fd.ieldNum);
- if (fieldFlags[fieldNumOff] == -1) {
- fieldFlags[fieldNumOff] = fd.flags;
- } else if (fieldFlags[fieldNumOff] != fd.flags) {
- nonChangingFlags = false;
- break outer;
- }
+ bool shouldBreakOuter;
+ foreach (DocData dd in pendingDocs)
+ {
+ shouldBreakOuter = false;
+ foreach (FieldData fd in dd.fields)
+ {
+ int fieldNumOff = Array.BinarySearch(fieldNums, fd.fieldNum);
+ if (fieldFlags[fieldNumOff] == -1)
+ {
+ fieldFlags[fieldNumOff] = fd.flags;
+ }
+ else if (fieldFlags[fieldNumOff] != fd.flags)
+ {
+ nonChangingFlags = false;
+ shouldBreakOuter = true;
+ }
}
+
+ if (shouldBreakOuter)
+ break;
}
- if (nonChangingFlags) {
+ if (nonChangingFlags)
+ {
// write one flag per field num
vectorsStream.WriteVInt(0);
- PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldFlags.length, FLAGS_BITS, 1);
- for (int flags : fieldFlags) {
- writer.Add(flags);
+ PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldFlags.Length, FLAGS_BITS, 1);
+ foreach (int flags in fieldFlags)
+ {
+ writer.Add(flags);
}
writer.Finish();
- } else {
+ }
+ else
+ {
// write one flag for every field instance
vectorsStream.WriteVInt(1);
PackedInts.Writer writer = PackedInts.GetWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, FLAGS_BITS, 1);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- writer.add(fd.flags);
- }
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ writer.Add(fd.flags);
+ }
}
writer.Finish();
}
}
- private void flushNumTerms(int totalFields)
+ private void FlushNumTerms(int totalFields)
{
int maxNumTerms = 0;
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- maxNumTerms |= fd.numTerms;
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ maxNumTerms |= fd.numTerms;
}
}
-
- int bitsRequired = PackedInts.bitsRequired(maxNumTerms);
+
+ int bitsRequired = PackedInts.BitsRequired(maxNumTerms);
vectorsStream.WriteVInt(bitsRequired);
- PackedInts.Writer writer = PackedInts.getWriterNoHeader(
+ PackedInts.Writer writer = PackedInts.GetWriterNoHeader(
vectorsStream, PackedInts.Format.PACKED, totalFields, bitsRequired, 1);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- writer.add(fd.numTerms);
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ writer.Add(fd.numTerms);
}
}
- writer.finish();
+ writer.Finish();
}
- private void flushTermLengths()
+ private void FlushTermLengths()
{
- writer.reset(vectorsStream);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- for (int i = 0; i < fd.numTerms; ++i) {
- writer.add(fd.prefixLengths[i]);
- }
+ writer.Reset(vectorsStream);
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ for (int i = 0; i < fd.numTerms; ++i)
+ {
+ writer.Add(fd.prefixLengths[i]);
+ }
}
}
- writer.finish();
- writer.reset(vectorsStream);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- for (int i = 0; i < fd.numTerms; ++i) {
- writer.add(fd.suffixLengths[i]);
- }
+ writer.Finish();
+ writer.Reset(vectorsStream);
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ for (int i = 0; i < fd.numTerms; ++i)
+ {
+ writer.Add(fd.suffixLengths[i]);
+ }
}
}
- writer.finish();
+ writer.Finish();
}
- private void flushTermFreqs()
+ private void FlushTermFreqs()
{
- writer.reset(vectorsStream);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- for (int i = 0; i < fd.numTerms; ++i) {
- writer.add(fd.freqs[i] - 1);
- }
+ writer.Reset(vectorsStream);
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ for (int i = 0; i < fd.numTerms; ++i)
+ {
+ writer.Add(fd.freqs[i] - 1);
+ }
}
}
- writer.finish();
+ writer.Finish();
}
- private void flushPositions()
+ private void FlushPositions()
{
- writer.reset(vectorsStream);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- if (fd.hasPositions) {
- int pos = 0;
- for (int i = 0; i < fd.numTerms; ++i) {
- int previousPosition = 0;
- for (int j = 0; j < fd.freqs[i]; ++j) {
- int position = positionsBuf[fd .posStart + pos++];
- writer.add(position - previousPosition);
- previousPosition = position;
- }
+ writer.Reset(vectorsStream);
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ if (fd.hasPositions)
+ {
+ int pos = 0;
+ for (int i = 0; i < fd.numTerms; ++i)
+ {
+ int previousPosition = 0;
+ for (int j = 0; j < fd.freqs[i]; ++j)
+ {
+ int position = positionsBuf[fd.posStart + pos++];
+ writer.Add(position - previousPosition);
+ previousPosition = position;
+ }
+ }
}
}
- }
}
- writer.finish();
+ writer.Finish();
}
- private void flushOffsets(int[] fieldNums)
+ private void FlushOffsets(int[] fieldNums)
{
bool hasOffsets = false;
- long[] sumPos = new long[fieldNums.length];
- long[] sumOffsets = new long[fieldNums.length];
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- hasOffsets |= fd.hasOffsets;
- if (fd.hasOffsets && fd.hasPositions) {
- int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
- int pos = 0;
- for (int i = 0; i < fd.numTerms; ++i) {
- int previousPos = 0;
- int previousOff = 0;
- for (int j = 0; j < fd.freqs[i]; ++j) {
- int position = positionsBuf[fd.posStart + pos];
- int startOffset = startOffsetsBuf[fd.offStart + pos];
- sumPos[fieldNumOff] += position - previousPos;
- sumOffsets[fieldNumOff] += startOffset - previousOff;
- previousPos = position;
- previousOff = startOffset;
- ++pos;
- }
+ long[] sumPos = new long[fieldNums.Length];
+ long[] sumOffsets = new long[fieldNums.Length];
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ hasOffsets |= fd.hasOffsets;
+ if (fd.hasOffsets && fd.hasPositions)
+ {
+ int fieldNumOff = Array.BinarySearch(fieldNums, fd.fieldNum);
+ int pos = 0;
+ for (int i = 0; i < fd.numTerms; ++i)
+ {
+ int previousPos = 0;
+ int previousOff = 0;
+ for (int j = 0; j < fd.freqs[i]; ++j)
+ {
+ int position = positionsBuf[fd.posStart + pos];
+ int startOffset = startOffsetsBuf[fd.offStart + pos];
+ sumPos[fieldNumOff] += position - previousPos;
+ sumOffsets[fieldNumOff] += startOffset - previousOff;
+ previousPos = position;
+ previousOff = startOffset;
+ ++pos;
+ }
+ }
}
}
- }
}
- if (!hasOffsets) {
+ if (!hasOffsets)
+ {
// nothing to do
return;
}
- float[] charsPerTerm = new float[fieldNums.length];
- for (int i = 0; i < fieldNums.length; ++i) {
- charsPerTerm[i] = (sumPos[i] <= 0 || sumOffsets[i] <= 0) ? 0 : (float) ((double) sumOffsets[i] / sumPos[i]);
+ float[] charsPerTerm = new float[fieldNums.Length];
+ for (int i = 0; i < fieldNums.Length; ++i)
+ {
+ charsPerTerm[i] = (sumPos[i] <= 0 || sumOffsets[i] <= 0) ? 0 : (float)((double)sumOffsets[i] / sumPos[i]);
}
// start offsets
- for (int i = 0; i < fieldNums.length; ++i) {
- vectorsStream.writeInt(Float.floatToRawIntBits(charsPerTerm[i]));
- }
-
- writer.reset(vectorsStream);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- if ((fd.flags & OFFSETS) != 0) {
- int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
- float cpt = charsPerTerm[fieldNumOff];
- int pos = 0;
- for (int i = 0; i < fd.numTerms; ++i) {
- int previousPos = 0;
- int previousOff = 0;
- for (int j = 0; j < fd.freqs[i]; ++j) {
- final int position = fd.hasPositions ? positionsBuf[fd.posStart + pos] : 0;
- final int startOffset = startOffsetsBuf[fd.offStart + pos];
- writer.add(startOffset - previousOff - (int) (cpt * (position - previousPos)));
- previousPos = position;
- previousOff = startOffset;
- ++pos;
- }
+ for (int i = 0; i < fieldNums.Length; ++i)
+ {
+ vectorsStream.WriteInt(Number.FloatToIntBits(charsPerTerm[i]));
+ }
+
+ writer.Reset(vectorsStream);
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ if ((fd.flags & OFFSETS) != 0)
+ {
+ int fieldNumOff = Array.BinarySearch(fieldNums, fd.fieldNum);
+ float cpt = charsPerTerm[fieldNumOff];
+ int pos = 0;
+ for (int i = 0; i < fd.numTerms; ++i)
+ {
+ int previousPos = 0;
+ int previousOff = 0;
+ for (int j = 0; j < fd.freqs[i]; ++j)
+ {
+ int position = fd.hasPositions ? positionsBuf[fd.posStart + pos] : 0;
+ int startOffset = startOffsetsBuf[fd.offStart + pos];
+ writer.Add(startOffset - previousOff - (int)(cpt * (position - previousPos)));
+ previousPos = position;
+ previousOff = startOffset;
+ ++pos;
+ }
+ }
}
}
- }
}
- writer.finish();
+ writer.Finish();
// lengths
- writer.reset(vectorsStream);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- if ((fd.flags & OFFSETS) != 0) {
- int pos = 0;
- for (int i = 0; i < fd.numTerms; ++i) {
- for (int j = 0; j < fd.freqs[i]; ++j) {
- writer.add(lengthsBuf[fd.offStart + pos++] - fd.prefixLengths[i] - fd.suffixLengths[i]);
- }
+ writer.Reset(vectorsStream);
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ if ((fd.flags & OFFSETS) != 0)
+ {
+ int pos = 0;
+ for (int i = 0; i < fd.numTerms; ++i)
+ {
+ for (int j = 0; j < fd.freqs[i]; ++j)
+ {
+ writer.Add(lengthsBuf[fd.offStart + pos++] - fd.prefixLengths[i] - fd.suffixLengths[i]);
+ }
+ }
}
}
- }
}
- writer.finish();
+ writer.Finish();
}
- private void flushPayloadLengths()
+ private void FlushPayloadLengths()
{
- writer.reset(vectorsStream);
- for (DocData dd : pendingDocs) {
- for (FieldData fd : dd.fields) {
- if (fd.hasPayloads) {
- for (int i = 0; i < fd.totalPositions; ++i) {
- writer.add(payloadLengthsBuf[fd.payStart + i]);
+ writer.Reset(vectorsStream);
+ foreach (DocData dd in pendingDocs)
+ {
+ foreach (FieldData fd in dd.fields)
+ {
+ if (fd.hasPayloads)
+ {
+ for (int i = 0; i < fd.totalPositions; ++i)
+ {
+ writer.Add(payloadLengthsBuf[fd.payStart + i]);
+ }
}
}
- }
}
- writer.finish();
+ writer.Finish();
}
-
-
public override void Abort()
{
- IOUtils.CloseWhileHandlingException(this);
+ IOUtils.CloseWhileHandlingException((IDisposable)this);
IOUtils.DeleteFilesIgnoringExceptions(directory,
IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_EXTENSION),
IndexFileNames.SegmentFileName(segment, segmentSuffix, VECTORS_INDEX_EXTENSION));
@@ -619,171 +704,191 @@ namespace Lucene.Net.Codecs.Compressing
public override void Finish(Index.FieldInfos fis, int numDocs)
{
- if (!pendingDocs.isEmpty()) {
- flush();
+ if (pendingDocs.Count > 0)
+ {
+ Flush();
}
- if (numDocs != this.numDocs) {
- throw new RuntimeException("Wrote " + this.numDocs + " docs, finish called with numDocs=" + numDocs);
+ if (numDocs != this.numDocs)
+ {
+ throw new SystemException("Wrote " + this.numDocs + " docs, finish called with numDocs=" + numDocs);
}
- indexWriter.finish(numDocs);
+ indexWriter.Finish(numDocs);
}
public override IComparer<Util.BytesRef> Comparator
{
- get
- {
- return BytesRef.getUTF8SortedAsUnicodeComparator();
+ get
+ {
+ return BytesRef.UTF8SortedAsUnicodeComparer;
}
}
- public void addProx(int numProx, DataInput positions, DataInput offsets)
+ public override void AddProx(int numProx, DataInput positions, DataInput offsets)
{
- if (curField.hasPositions) {
- final int posStart = curField.posStart + curField.totalPositions;
- if (posStart + numProx > positionsBuf.length) {
- positionsBuf = ArrayUtil.grow(positionsBuf, posStart + numProx);
+ if (curField.hasPositions)
+ {
+ int posStart = curField.posStart + curField.totalPositions;
+ if (posStart + numProx > positionsBuf.Length)
+ {
+ positionsBuf = ArrayUtil.Grow(positionsBuf, posStart + numProx);
}
int position = 0;
- if (curField.hasPayloads) {
- final int payStart = curField.payStart + curField.totalPositions;
- if (payStart + numProx > payloadLengthsBuf.length) {
- payloadLengthsBuf = ArrayUtil.grow(payloadLengthsBuf, payStart + numProx);
- }
- for (int i = 0; i < numProx; ++i) {
- final int code = positions.readVInt();
- if ((code & 1) != 0) {
- // This position has a payload
- final int payloadLength = positions.readVInt();
- payloadLengthsBuf[payStart + i] = payloadLength;
- payloadBytes.copyBytes(positions, payloadLength);
- } else {
- payloadLengthsBuf[payStart + i] = 0;
+ if (curField.hasPayloads)
+ {
+ int payStart = curField.payStart + curField.totalPositions;
+ if (payStart + numProx > payloadLengthsBuf.Length)
+ {
+ payloadLengthsBuf = ArrayUtil.Grow(payloadLengthsBuf, payStart + numProx);
+ }
+ for (int i = 0; i < numProx; ++i)
+ {
+ int code = positions.ReadVInt();
+ if ((code & 1) != 0)
+ {
+ // This position has a payload
+ int payloadLength = positions.ReadVInt();
+ payloadLengthsBuf[payStart + i] = payloadLength;
+ payloadBytes.CopyBytes(positions, payloadLength);
+ }
+ else
+ {
+ payloadLengthsBuf[payStart + i] = 0;
+ }
+ position += Number.URShift(code, 1);
+ positionsBuf[posStart + i] = position;
}
- position += code >>> 1;
- positionsBuf[posStart + i] = position;
- }
- } else {
- for (int i = 0; i < numProx; ++i) {
- position += (positions.readVInt() >>> 1);
- positionsBuf[posStart + i] = position;
}
+ else
+ {
+ for (int i = 0; i < numProx; ++i)
+ {
+ position += Number.URShift(positions.ReadVInt(), 1);
+ positionsBuf[posStart + i] = position;
+ }
}
}
- if (curField.hasOffsets) {
+ if (curField.hasOffsets)
+ {
int offStart = curField.offStart + curField.totalPositions;
- if (offStart + numProx > startOffsetsBuf.length) {
- int newLength = ArrayUtil.oversize(offStart + numProx, 4);
- startOffsetsBuf = Arrays.copyOf(startOffsetsBuf, newLength);
- lengthsBuf = Arrays.copyOf(lengthsBuf, newLength);
+ if (offStart + numProx > startOffsetsBuf.Length)
+ {
+ int newLength = ArrayUtil.Oversize(offStart + numProx, 4);
+ startOffsetsBuf = Arrays.CopyOf(startOffsetsBuf, newLength);
+ lengthsBuf = Arrays.CopyOf(lengthsBuf, newLength);
}
-
+
int lastOffset = 0, startOffset, endOffset;
- for (int i = 0; i < numProx; ++i) {
- startOffset = lastOffset + offsets.readVInt();
- endOffset = startOffset + offsets.readVInt();
- lastOffset = endOffset;
- startOffsetsBuf[offStart + i] = startOffset;
- lengthsBuf[offStart + i] = endOffset - startOffset;
+ for (int i = 0; i < numProx; ++i)
+ {
+ startOffset = lastOffset + offsets.ReadVInt();
+ endOffset = startOffset + offsets.ReadVInt();
+ lastOffset = endOffset;
+ startOffsetsBuf[offStart + i] = startOffset;
+ lengthsBuf[offStart + i] = endOffset - startOffset;
}
}
curField.totalPositions += numProx;
}
- public int merge(MergeState mergeState)
+ public override int Merge(MergeState mergeState)
{
int docCount = 0;
int idx = 0;
- for (AtomicReader reader : mergeState.readers)
+ foreach (AtomicReader reader in mergeState.readers)
{
SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
CompressingTermVectorsReader matchingVectorsReader = null;
if (matchingSegmentReader != null) {
- TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();
+ TermVectorsReader vectorsReader = matchingSegmentReader.TermVectorsReader;
// we can only bulk-copy if the matching reader is also a CompressingTermVectorsReader
- if (vectorsReader != null && vectorsReader instanceof CompressingTermVectorsReader) {
+ if (vectorsReader != null && vectorsReader is CompressingTermVectorsReader) {
matchingVectorsReader = (CompressingTermVectorsReader) vectorsReader;
}
}
- int maxDoc = reader.maxDoc();
- Bits liveDocs = reader.getLiveDocs();
+ int maxDoc = reader.MaxDoc;
+ IBits liveDocs = reader.LiveDocs;
if (matchingVectorsReader == null
- || matchingVectorsReader.getCompressionMode() != compressionMode
- || matchingVectorsReader.getChunkSize() != chunkSize
- || matchingVectorsReader.getPackedIntsVersion() != PackedInts.VERSION_CURRENT) {
+ || matchingVectorsReader.CompressionMode != compressionMode
+ || matchingVectorsReader.ChunkSize != chunkSize
+ || matchingVectorsReader.PackedIntsVersion != PackedInts.VERSION_CURRENT) {
// naive merge...
- for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
- Fields vectors = reader.getTermVectors(i);
- addAllDocVectors(vectors, mergeState);
+ for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = NextLiveDoc(i + 1, liveDocs, maxDoc)) {
+ Fields vectors = reader.GetTermVectors(i);
+ AddAllDocVectors(vectors, mergeState);
++docCount;
- mergeState.checkAbort.work(300);
+ mergeState.checkAbort.Work(300);
}
} else {
- CompressingStoredFieldsIndexReader index = matchingVectorsReader.getIndex();
- IndexInput vectorsStream = matchingVectorsReader.getVectorsStream();
- for (int i = nextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; ) {
- if (pendingDocs.isEmpty()
- && (i == 0 || index.getStartPointer(i - 1) < index.getStartPointer(i))) { // start of a chunk
- long startPointer = index.getStartPointer(i);
- vectorsStream.seek(startPointer);
- int docBase = vectorsStream.readVInt();
- int chunkDocs = vectorsStream.readVInt();
- if (docBase + chunkDocs < matchingSegmentReader.maxDoc()
- && nextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs) {
- long chunkEnd = index.getStartPointer(docBase + chunkDocs);
- long chunkLength = chunkEnd - vectorsStream.getFilePointer();
- indexWriter.writeIndex(chunkDocs, this.vectorsStream.getFilePointer());
- this.vectorsStream.writeVInt(docCount);
- this.vectorsStream.writeVInt(chunkDocs);
- this.vectorsStream.copyBytes(vectorsStream, chunkLength);
+ CompressingStoredFieldsIndexReader index = matchingVectorsReader.Index;
+ IndexInput vectorsStream = matchingVectorsReader.VectorsStream;
+ for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; ) {
+ if (pendingDocs.Count == 0
+ && (i == 0 || index.GetStartPointer(i - 1) < index.GetStartPointer(i))) { // start of a chunk
+ long startPointer = index.GetStartPointer(i);
+ vectorsStream.Seek(startPointer);
+ int docBase = vectorsStream.ReadVInt();
+ int chunkDocs = vectorsStream.ReadVInt();
+ if (docBase + chunkDocs < matchingSegmentReader.MaxDoc
+ && NextDeletedDoc(docBase, liveDocs, docBase + chunkDocs) == docBase + chunkDocs) {
+ long chunkEnd = index.GetStartPointer(docBase + chunkDocs);
+ long chunkLength = chunkEnd - vectorsStream.FilePointer;
+ indexWriter.WriteIndex(chunkDocs, this.vectorsStream.FilePointer);
+ this.vectorsStream.WriteVInt(docCount);
+ this.vectorsStream.WriteVInt(chunkDocs);
+ this.vectorsStream.CopyBytes(vectorsStream, chunkLength);
docCount += chunkDocs;
this.numDocs += chunkDocs;
- mergeState.checkAbort.work(300 * chunkDocs);
- i = nextLiveDoc(docBase + chunkDocs, liveDocs, maxDoc);
+ mergeState.checkAbort.Work(300 * chunkDocs);
+ i = NextLiveDoc(docBase + chunkDocs, liveDocs, maxDoc);
} else {
- for (; i < docBase + chunkDocs; i = nextLiveDoc(i + 1, liveDocs, maxDoc)) {
- Fields vectors = reader.getTermVectors(i);
- addAllDocVectors(vectors, mergeState);
+ for (; i < docBase + chunkDocs; i = NextLiveDoc(i + 1, liveDocs, maxDoc)) {
+ Fields vectors = reader.GetTermVectors(i);
+ AddAllDocVectors(vectors, mergeState);
++docCount;
- mergeState.checkAbort.work(300);
+ mergeState.checkAbort.Work(300);
}
}
} else {
- Fields vectors = reader.getTermVectors(i);
- addAllDocVectors(vectors, mergeState);
+ Fields vectors = reader.GetTermVectors(i);
+ AddAllDocVectors(vectors, mergeState);
++docCount;
- mergeState.checkAbort.work(300);
- i = nextLiveDoc(i + 1, liveDocs, maxDoc);
+ mergeState.checkAbort.Work(300);
+ i = NextLiveDoc(i + 1, liveDocs, maxDoc);
}
}
}
}
- finish(mergeState.fieldInfos, docCount);
+ Finish(mergeState.fieldInfos, docCount);
return docCount;
}
- private static int nextLiveDoc(int doc, Bits liveDocs, int maxDoc)
+ private static int NextLiveDoc(int doc, IBits liveDocs, int maxDoc)
{
- if (liveDocs == null) {
+ if (liveDocs == null)
+ {
return doc;
}
- while (doc < maxDoc && !liveDocs.get(doc)) {
+ while (doc < maxDoc && !liveDocs[doc])
+ {
++doc;
}
return doc;
}
- private static int nextDeletedDoc(int doc, Bits liveDocs, int maxDoc)
+ private static int NextDeletedDoc(int doc, IBits liveDocs, int maxDoc)
{
- if (liveDocs == null) {
+ if (liveDocs == null)
+ {
return maxDoc;
}
- while (doc < maxDoc && liveDocs.get(doc)) {
+ while (doc < maxDoc && liveDocs[doc])
+ {
++doc;
}
return doc;
@@ -791,10 +896,12 @@ namespace Lucene.Net.Codecs.Compressing
protected override void Dispose(bool disposing)
{
- try
+ try
{
IOUtils.Close(vectorsStream, indexWriter);
- } finally {
+ }
+ finally
+ {
vectorsStream = null;
indexWriter = null;
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Compressing/CompressionMode.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Compressing/CompressionMode.cs b/src/core/Codecs/Compressing/CompressionMode.cs
index 22b5fca..0982fd0 100644
--- a/src/core/Codecs/Compressing/CompressionMode.cs
+++ b/src/core/Codecs/Compressing/CompressionMode.cs
@@ -128,7 +128,7 @@ namespace Lucene.Net.Codecs.Compressing
public sealed class DecompressorLZ4 : Decompressor
{
- public override void decompress(DataInput input, int originalLength, int offset, int length, BytesRef bytes)
+ public override void Decompress(DataInput input, int originalLength, int offset, int length, BytesRef bytes)
{
// add 7 padding bytes, this is not necessary but can help decompression run faster
if (bytes.bytes.Length < originalLength + 7)
@@ -145,7 +145,7 @@ namespace Lucene.Net.Codecs.Compressing
bytes.length = length;
}
- public override Decompressor clone()
+ public override object Clone()
{
return this;
}
@@ -161,7 +161,7 @@ namespace Lucene.Net.Codecs.Compressing
ht = new LZ4.HashTable();
}
- public override void compress(byte[] bytes, int off, int len, DataOutput output)
+ public override void Compress(sbyte[] bytes, int off, int len, DataOutput output)
{
LZ4.Compress(bytes, off, len, output, ht);
}
@@ -178,7 +178,7 @@ namespace Lucene.Net.Codecs.Compressing
ht = new LZ4.HCHashTable();
}
- public override void compress(byte[] bytes, int off, int len, DataOutput output)
+ public override void Compress(sbyte[] bytes, int off, int len, DataOutput output)
{
LZ4.CompressHC(bytes, off, len, output, ht);
}
@@ -252,7 +252,7 @@ namespace Lucene.Net.Codecs.Compressing
bytes.length = length;
}
- public override Decompressor clone()
+ public override object Clone()
{
return new DeflateDecompressor();
}
@@ -271,7 +271,7 @@ namespace Lucene.Net.Codecs.Compressing
compressed = new sbyte[64];
}
- public override void compress(byte[] bytes, int off, int len, DataOutput output)
+ public override void Compress(sbyte[] bytes, int off, int len, DataOutput output)
{
compressor.Reset();
compressor.SetInput(bytes, off, len);
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/FieldsProducer.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/FieldsProducer.cs b/src/core/Codecs/FieldsProducer.cs
index 0f46fc7..873ce08 100644
--- a/src/core/Codecs/FieldsProducer.cs
+++ b/src/core/Codecs/FieldsProducer.cs
@@ -12,11 +12,11 @@ namespace Lucene.Net.Codecs
{
}
- public abstract IEnumerator<string> GetEnumerator();
+ public abstract override IEnumerator<string> GetEnumerator();
- public abstract Terms Terms(string field);
+ public abstract override Terms Terms(string field);
- public abstract int Size { get; }
+ public abstract override int Size { get; }
public void Dispose()
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs b/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
index 52d3e41..caa2e21 100644
--- a/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
+++ b/src/core/Codecs/Lucene3x/Lucene3xStoredFieldsReader.cs
@@ -198,7 +198,7 @@ namespace Lucene.Net.Codecs.Lucene3x
indexStream.Seek(FORMAT_SIZE + (docID + docStoreOffset) * 8L);
}
- public void VisitDocument(int n, StoredFieldVisitor visitor)
+ public override void VisitDocument(int n, StoredFieldVisitor visitor)
{
SeekIndex(n);
fieldsStream.Seek(indexStream.ReadLong());
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/Lucene3x/SegmentTermDocs.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/Lucene3x/SegmentTermDocs.cs b/src/core/Codecs/Lucene3x/SegmentTermDocs.cs
index 1edd372..8427538 100644
--- a/src/core/Codecs/Lucene3x/SegmentTermDocs.cs
+++ b/src/core/Codecs/Lucene3x/SegmentTermDocs.cs
@@ -21,7 +21,7 @@ namespace Lucene.Net.Codecs.Lucene3x
protected IBits liveDocs;
protected IndexInput freqStream;
protected int count;
- protected int df;
+ protected internal int df;
internal int doc = 0;
internal int freq;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/MultiLevelSkipListReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/MultiLevelSkipListReader.cs b/src/core/Codecs/MultiLevelSkipListReader.cs
index f212f72..47bec11 100644
--- a/src/core/Codecs/MultiLevelSkipListReader.cs
+++ b/src/core/Codecs/MultiLevelSkipListReader.cs
@@ -276,7 +276,7 @@ namespace Lucene.Net.Codecs
return data[pos++];
}
- public override void ReadBytes(byte[] b, int offset, int len, bool useBuffer)
+ public override void ReadBytes(byte[] b, int offset, int len)
{
Array.Copy(data, pos, b, offset, len);
pos += len;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Codecs/PostingsWriterBase.cs
----------------------------------------------------------------------
diff --git a/src/core/Codecs/PostingsWriterBase.cs b/src/core/Codecs/PostingsWriterBase.cs
index 8cead37..aa863a8 100644
--- a/src/core/Codecs/PostingsWriterBase.cs
+++ b/src/core/Codecs/PostingsWriterBase.cs
@@ -7,7 +7,7 @@ using System.Text;
namespace Lucene.Net.Codecs
{
- public class PostingsWriterBase : PostingsConsumer, IDisposable
+ public abstract class PostingsWriterBase : PostingsConsumer, IDisposable
{
protected PostingsWriterBase()
{
@@ -30,5 +30,11 @@ namespace Lucene.Net.Codecs
}
protected abstract void Dispose(bool disposing);
+
+ public abstract override void StartDoc(int docID, int freq);
+
+ public abstract override void AddPosition(int position, Util.BytesRef payload, int startOffset, int endOffset);
+
+ public abstract override void FinishDoc();
}
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Document/Document.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/Document.cs b/src/core/Document/Document.cs
index 608cfe6..8ea9490 100644
--- a/src/core/Document/Document.cs
+++ b/src/core/Document/Document.cs
@@ -50,6 +50,16 @@ namespace Lucene.Net.Documents
public Document()
{
}
+
+ public IEnumerator<IIndexableField> GetEnumerator()
+ {
+ return fields.GetEnumerator();
+ }
+
+ System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
+ {
+ return GetEnumerator();
+ }
public void Add(IIndexableField field)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Document/Field.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/Field.cs b/src/core/Document/Field.cs
index 1548dc9..9cc069f 100644
--- a/src/core/Document/Field.cs
+++ b/src/core/Document/Field.cs
@@ -292,7 +292,7 @@ namespace Lucene.Net.Documents
fieldsData = value;
}
- public void SetIntValue(int value)
+ public virtual void SetIntValue(int value)
{
if (!(fieldsData is int))
{
@@ -388,7 +388,7 @@ namespace Lucene.Net.Documents
return result.ToString();
}
- public FieldType FieldTypeValue
+ public IIndexableFieldType FieldTypeValue
{
get { return type; }
}
@@ -400,7 +400,7 @@ namespace Lucene.Net.Documents
return null;
}
- FieldType.NumericType? numericType = FieldTypeValue.NumericTypeValue;
+ FieldType.NumericType? numericType = ((FieldType)FieldTypeValue).NumericTypeValue;
if (numericType != null)
{
@@ -511,7 +511,7 @@ namespace Lucene.Net.Documents
}
}
- public override void Dispose()
+ public void Dispose()
{
pos = size; // this prevents NPE when reading after close!
s = null;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Document/FieldType.cs
----------------------------------------------------------------------
diff --git a/src/core/Document/FieldType.cs b/src/core/Document/FieldType.cs
index 06394f0..f9410ea 100644
--- a/src/core/Document/FieldType.cs
+++ b/src/core/Document/FieldType.cs
@@ -255,7 +255,7 @@ namespace Lucene.Net.Documents
return result.ToString();
}
- public override FieldInfo.DocValuesType DocValueType
+ public FieldInfo.DocValuesType DocValueType
{
get { return docValueType; }
set
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/AtomicReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/AtomicReader.cs b/src/core/Index/AtomicReader.cs
index 6ecf68b..5981765 100644
--- a/src/core/Index/AtomicReader.cs
+++ b/src/core/Index/AtomicReader.cs
@@ -18,7 +18,7 @@ namespace Lucene.Net.Index
this.readerContext = new AtomicReaderContext(this);
}
- public sealed override AtomicReaderContext Context
+ public sealed override IndexReaderContext Context
{
get
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/AtomicReaderContext.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/AtomicReaderContext.cs b/src/core/Index/AtomicReaderContext.cs
index 37462a7..4a804b3 100644
--- a/src/core/Index/AtomicReaderContext.cs
+++ b/src/core/Index/AtomicReaderContext.cs
@@ -49,7 +49,7 @@ namespace Lucene.Net.Index
}
}
- public override AtomicReader Reader
+ public override IndexReader Reader
{
get
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/BaseCompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/BaseCompositeReader.cs b/src/core/Index/BaseCompositeReader.cs
index f2af247..81f8322 100644
--- a/src/core/Index/BaseCompositeReader.cs
+++ b/src/core/Index/BaseCompositeReader.cs
@@ -164,11 +164,12 @@ namespace Lucene.Net.Index
return this.starts[readerIndex];
}
- protected override IList<R> GetSequentialSubReaders()
+ protected internal override IList<IndexReader> GetSequentialSubReaders()
{
- return subReadersList;
+ // TODO: .NET Port: does the new instance here cause problems?
+ return subReadersList.Cast<IndexReader>().ToList();
}
- protected internal abstract void DoClose();
+ protected override abstract void DoClose();
}
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/BinaryDocValuesWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/BinaryDocValuesWriter.cs b/src/core/Index/BinaryDocValuesWriter.cs
index 37926c5..f34dd48 100644
--- a/src/core/Index/BinaryDocValuesWriter.cs
+++ b/src/core/Index/BinaryDocValuesWriter.cs
@@ -58,12 +58,16 @@ namespace Lucene.Net.Index
dvConsumer.AddBinaryField(fieldInfo, GetBytesIterator(maxDoc));
}
+ internal override void Abort()
+ {
+ }
+
private IEnumerable<BytesRef> GetBytesIterator(int maxDocParam)
{
// .NET port: using yield return instead of a custom IEnumerable type
BytesRef value = new BytesRef();
- AppendingLongBuffer.Iterator lengthsIterator = lengths.GetIterator();
+ AppendingLongBuffer.Iterator lengthsIterator = (AppendingLongBuffer.Iterator)lengths.GetIterator();
int size = (int) lengths.Size;
int maxDoc = maxDocParam;
int upto = 0;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/ByteSliceReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ByteSliceReader.cs b/src/core/Index/ByteSliceReader.cs
index 7e8274e..cf0a0dc 100644
--- a/src/core/Index/ByteSliceReader.cs
+++ b/src/core/Index/ByteSliceReader.cs
@@ -52,17 +52,17 @@ namespace Lucene.Net.Index
this.endIndex = endIndex;
level = 0;
- bufferUpto = startIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
- bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+ bufferUpto = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
+ bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
buffer = pool.buffers[bufferUpto];
- upto = startIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+ upto = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;
int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];
if (startIndex + firstSize >= endIndex)
{
// There is only this one slice to read
- limit = endIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+ limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
}
else
limit = upto + firstSize - 4;
@@ -115,11 +115,11 @@ namespace Lucene.Net.Index
level = ByteBlockPool.NEXT_LEVEL_ARRAY[level];
int newSize = ByteBlockPool.LEVEL_SIZE_ARRAY[level];
- bufferUpto = nextIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
- bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+ bufferUpto = nextIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
+ bufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
buffer = pool.buffers[bufferUpto];
- upto = nextIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+ upto = nextIndex & ByteBlockPool.BYTE_BLOCK_MASK;
if (nextIndex + newSize >= endIndex)
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/ByteSliceWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/ByteSliceWriter.cs b/src/core/Index/ByteSliceWriter.cs
index 1329441..9195a2f 100644
--- a/src/core/Index/ByteSliceWriter.cs
+++ b/src/core/Index/ByteSliceWriter.cs
@@ -42,15 +42,15 @@ namespace Lucene.Net.Index
/// <summary> Set up the writer to write at address.</summary>
public void Init(int address)
{
- slice = pool.buffers[address >> DocumentsWriter.BYTE_BLOCK_SHIFT];
+ slice = pool.buffers[address >> ByteBlockPool.BYTE_BLOCK_SHIFT];
Debug.Assert(slice != null);
- upto = address & DocumentsWriter.BYTE_BLOCK_MASK;
+ upto = address & ByteBlockPool.BYTE_BLOCK_MASK;
offset0 = address;
Debug.Assert(upto < slice.Length);
}
/// <summary>Write byte into byte slice stream </summary>
- public void WriteByte(byte b)
+ public override void WriteByte(byte b)
{
Debug.Assert(slice != null);
if (slice[upto] != 0)
@@ -64,7 +64,7 @@ namespace Lucene.Net.Index
Debug.Assert(upto != slice.Length);
}
- public void WriteBytes(byte[] b, int offset, int len)
+ public override void WriteBytes(byte[] b, int offset, int len)
{
int offsetEnd = offset + len;
while (offset < offsetEnd)
@@ -84,17 +84,17 @@ namespace Lucene.Net.Index
public int Address
{
- get { return upto + (offset0 & DocumentsWriter.BYTE_BLOCK_NOT_MASK); }
+ get { return upto + (offset0 & DocumentsWriterPerThread.BYTE_BLOCK_NOT_MASK); }
}
- public void WriteVInt(int i)
- {
- while ((i & ~0x7F) != 0)
- {
- WriteByte((byte)((i & 0x7f) | 0x80));
- i = Number.URShift(i, 7);
- }
- WriteByte((byte)i);
- }
+ //public void WriteVInt(int i)
+ //{
+ // while ((i & ~0x7F) != 0)
+ // {
+ // WriteByte((byte)((i & 0x7f) | 0x80));
+ // i = Number.URShift(i, 7);
+ // }
+ // WriteByte((byte)i);
+ //}
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/CompositeReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CompositeReader.cs b/src/core/Index/CompositeReader.cs
index 866e1d1..205c48e 100644
--- a/src/core/Index/CompositeReader.cs
+++ b/src/core/Index/CompositeReader.cs
@@ -51,7 +51,7 @@ namespace Lucene.Net.Index
*/
protected internal abstract IList<IndexReader> GetSequentialSubReaders();
- public override CompositeReaderContext Context
+ public override IndexReaderContext Context
{
get
{
@@ -66,25 +66,25 @@ namespace Lucene.Net.Index
}
}
- public abstract Fields GetTermVectors(int docID);
+ public abstract override Fields GetTermVectors(int docID);
- public abstract int NumDocs { get; }
+ public abstract override int NumDocs { get; }
- public abstract int MaxDoc { get; }
+ public abstract override int MaxDoc { get; }
- public abstract void Document(int docID, StoredFieldVisitor visitor);
+ public abstract override void Document(int docID, StoredFieldVisitor visitor);
- protected abstract void DoClose();
+ protected abstract override void DoClose();
- public abstract int DocFreq(Term term);
+ public abstract override int DocFreq(Term term);
- public abstract long TotalTermFreq(Term term);
+ public abstract override long TotalTermFreq(Term term);
- public abstract long GetSumDocFreq(string field);
+ public abstract override long GetSumDocFreq(string field);
- public abstract int GetDocCount(string field);
+ public abstract override int GetDocCount(string field);
- public abstract long GetSumTotalTermFreq(string field);
+ public abstract override long GetSumTotalTermFreq(string field);
}
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/CompositeReaderContext.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/CompositeReaderContext.cs b/src/core/Index/CompositeReaderContext.cs
index ad57d45..12e2f4f 100644
--- a/src/core/Index/CompositeReaderContext.cs
+++ b/src/core/Index/CompositeReaderContext.cs
@@ -62,7 +62,7 @@ namespace Lucene.Net.Index
}
}
- public override CompositeReader Reader
+ public override IndexReader Reader
{
get
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/DocValuesProcessor.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocValuesProcessor.cs b/src/core/Index/DocValuesProcessor.cs
index d74de92..5b94dce 100644
--- a/src/core/Index/DocValuesProcessor.cs
+++ b/src/core/Index/DocValuesProcessor.cs
@@ -31,7 +31,7 @@ namespace Lucene.Net.Index
public override void AddField(int docID, IIndexableField field, FieldInfo fieldInfo)
{
- FieldInfo.DocValuesType dvType = field.FieldType.DocValueType;
+ FieldInfo.DocValuesType dvType = field.FieldTypeValue.DocValueType;
if (dvType != null)
{
fieldInfo.DocValuesTypeValue = dvType;
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/DocumentsWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/DocumentsWriter.cs b/src/core/Index/DocumentsWriter.cs
index 6c64af9..7d944aa 100644
--- a/src/core/Index/DocumentsWriter.cs
+++ b/src/core/Index/DocumentsWriter.cs
@@ -404,7 +404,7 @@ namespace Lucene.Net.Index
}
}
- internal void Close()
+ public void Dispose()
{
closed = true;
flushControl.SetClosed();
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/FilterDirectoryReader.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FilterDirectoryReader.cs b/src/core/Index/FilterDirectoryReader.cs
index f221082..938e2f9 100644
--- a/src/core/Index/FilterDirectoryReader.cs
+++ b/src/core/Index/FilterDirectoryReader.cs
@@ -55,7 +55,7 @@ namespace Lucene.Net.Index
return instance == null ? null : DoWrapDirectoryReader(instance);
}
- protected override DirectoryReader DoOpenIfChanged()
+ protected internal override DirectoryReader DoOpenIfChanged()
{
return WrapDirectoryReader(instance.DoOpenIfChanged());
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/FreqProxTermsWriterPerField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/FreqProxTermsWriterPerField.cs b/src/core/Index/FreqProxTermsWriterPerField.cs
index 8cf260c..fed6bea 100644
--- a/src/core/Index/FreqProxTermsWriterPerField.cs
+++ b/src/core/Index/FreqProxTermsWriterPerField.cs
@@ -77,7 +77,7 @@ namespace Lucene.Net.Index
internal bool hasPayloads;
- internal override void SkippingLongTerm()
+ public override void SkippingLongTerm()
{
}
@@ -109,7 +109,7 @@ namespace Lucene.Net.Index
}
}
- internal override bool Start(IIndexableField[] fields, int count)
+ public override bool Start(IIndexableField[] fields, int count)
{
for (int i = 0; i < count; i++)
{
@@ -121,7 +121,7 @@ namespace Lucene.Net.Index
return false;
}
- internal override void Start(IIndexableField f)
+ public override void Start(IIndexableField f)
{
if (fieldState.attributeSource.HasAttribute<IPayloadAttribute>())
{
@@ -186,7 +186,7 @@ namespace Lucene.Net.Index
postings.lastOffsets[termID] = startOffset;
}
- internal override void NewTerm(int termID)
+ public override void NewTerm(int termID)
{
// First time we're seeing this term since the last
// flush
@@ -219,7 +219,7 @@ namespace Lucene.Net.Index
fieldState.uniqueTermCount++;
}
- internal override void AddTerm(int termID)
+ public override void AddTerm(int termID)
{
////assert docState.testPoint("FreqProxTermsWriterPerField.addTerm start");
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/IIndexableField.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IIndexableField.cs b/src/core/Index/IIndexableField.cs
index 2e3a2c3..29ff853 100644
--- a/src/core/Index/IIndexableField.cs
+++ b/src/core/Index/IIndexableField.cs
@@ -12,7 +12,7 @@ namespace Lucene.Net.Index
{
string Name { get; }
- IIndexableFieldType FieldType { get; }
+ IIndexableFieldType FieldTypeValue { get; }
float Boost { get; }
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/IndexWriter.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexWriter.cs b/src/core/Index/IndexWriter.cs
index aaebb1f..2f4c37b 100644
--- a/src/core/Index/IndexWriter.cs
+++ b/src/core/Index/IndexWriter.cs
@@ -873,7 +873,7 @@ namespace Lucene.Net.Index
infoStream.Message("IW", "now flush at close waitForMerges=" + waitForMerges);
}
- docWriter.Close();
+ docWriter.Dispose();
try
{
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/IndexWriterConfig.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/IndexWriterConfig.cs b/src/core/Index/IndexWriterConfig.cs
index 1ad38d6..3003acc 100644
--- a/src/core/Index/IndexWriterConfig.cs
+++ b/src/core/Index/IndexWriterConfig.cs
@@ -406,32 +406,32 @@ namespace Lucene.Net.Index
return SetInfoStream(new PrintStreamInfoStream(printStream));
}
- public override IndexWriterConfig SetMaxBufferedDeleteTerms(int maxBufferedDeleteTerms)
+ public override LiveIndexWriterConfig SetMaxBufferedDeleteTerms(int maxBufferedDeleteTerms)
{
return (IndexWriterConfig)base.SetMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
}
- public override IndexWriterConfig SetMaxBufferedDocs(int maxBufferedDocs)
+ public override LiveIndexWriterConfig SetMaxBufferedDocs(int maxBufferedDocs)
{
return (IndexWriterConfig)base.SetMaxBufferedDocs(maxBufferedDocs);
}
- public override IndexWriterConfig SetMergedSegmentWarmer(IndexReaderWarmer mergeSegmentWarmer)
+ public override LiveIndexWriterConfig SetMergedSegmentWarmer(IndexReaderWarmer mergeSegmentWarmer)
{
return (IndexWriterConfig)base.SetMergedSegmentWarmer(mergeSegmentWarmer);
}
- public override IndexWriterConfig SetRAMBufferSizeMB(double ramBufferSizeMB)
+ public override LiveIndexWriterConfig SetRAMBufferSizeMB(double ramBufferSizeMB)
{
return (IndexWriterConfig)base.SetRAMBufferSizeMB(ramBufferSizeMB);
}
- public override IndexWriterConfig SetReaderTermsIndexDivisor(int divisor)
+ public override LiveIndexWriterConfig SetReaderTermsIndexDivisor(int divisor)
{
return (IndexWriterConfig)base.SetReaderTermsIndexDivisor(divisor);
}
- public override IndexWriterConfig SetTermIndexInterval(int interval)
+ public override LiveIndexWriterConfig SetTermIndexInterval(int interval)
{
return (IndexWriterConfig)base.SetTermIndexInterval(interval);
}
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs b/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
index 0c0f705..f824e6b 100644
--- a/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
+++ b/src/core/Index/KeepOnlyLastCommitDeletionPolicy.cs
@@ -33,14 +33,14 @@ namespace Lucene.Net.Index
}
/// <summary> Deletes all commits except the most recent one.</summary>
- public void OnInit<T>(IList<T> commits) where T : IndexCommit
+ public override void OnInit<T>(IList<T> commits)
{
// Note that commits.size() should normally be 1:
OnCommit(commits);
}
/// <summary> Deletes all commits except the most recent one.</summary>
- public void OnCommit<T>(IList<T> commits) where T : IndexCommit
+ public override void OnCommit<T>(IList<T> commits)
{
// Note that commits.size() should normally be 2 (if not
// called by onInit above):
http://git-wip-us.apache.org/repos/asf/lucenenet/blob/80561f72/src/core/Index/LogMergePolicy.cs
----------------------------------------------------------------------
diff --git a/src/core/Index/LogMergePolicy.cs b/src/core/Index/LogMergePolicy.cs
index 76e0252..286ebd5 100644
--- a/src/core/Index/LogMergePolicy.cs
+++ b/src/core/Index/LogMergePolicy.cs
@@ -547,7 +547,7 @@ namespace Lucene.Net.Index
/// will return multiple merges, allowing the <see cref="MergeScheduler" />
/// to use concurrency.
/// </summary>
- public override MergeSpecification FindMerges(MergeTrigger mergeTrigger, SegmentInfos infos)
+ public override MergeSpecification FindMerges(MergeTrigger? mergeTrigger, SegmentInfos infos)
{
int numSegments = infos.Count;
if (Verbose)