You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucenenet.apache.org by ni...@apache.org on 2017/06/06 00:11:58 UTC

[25/48] lucenenet git commit: Lucene.Net.Util: Fixed XML Documentation comments, types beginning with H-Z

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/WAH8DocIdSet.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/WAH8DocIdSet.cs b/src/Lucene.Net/Util/WAH8DocIdSet.cs
index 2641a41..b5abda8 100644
--- a/src/Lucene.Net/Util/WAH8DocIdSet.cs
+++ b/src/Lucene.Net/Util/WAH8DocIdSet.cs
@@ -29,48 +29,48 @@ namespace Lucene.Net.Util
     using PackedInt32s = Lucene.Net.Util.Packed.PackedInt32s;
 
     /// <summary>
-    /// <seealso cref="DocIdSet"/> implementation based on word-aligned hybrid encoding on
+    /// <see cref="DocIdSet"/> implementation based on word-aligned hybrid encoding on
     /// words of 8 bits.
-    /// <p>this implementation doesn't support random-access but has a fast
-    /// <seealso cref="DocIdSetIterator"/> which can advance in logarithmic time thanks to
-    /// an index.</p>
-    /// <p>The compression scheme is simplistic and should work well with sparse and
+    /// <para>This implementation doesn't support random-access but has a fast
+    /// <see cref="DocIdSetIterator"/> which can advance in logarithmic time thanks to
+    /// an index.</para>
+    /// <para>The compression scheme is simplistic and should work well with sparse and
     /// very dense doc id sets while being only slightly larger than a
-    /// <seealso cref="FixedBitSet"/> for incompressible sets (overhead&lt;2% in the worst
-    /// case) in spite of the index.</p>
-    /// <p><b>Format</b>: The format is byte-aligned. An 8-bits word is either clean,
+    /// <see cref="FixedBitSet"/> for incompressible sets (overhead&lt;2% in the worst
+    /// case) in spite of the index.</para>
+    /// <para><b>Format</b>: The format is byte-aligned. An 8-bits word is either clean,
     /// meaning composed only of zeros or ones, or dirty, meaning that it contains
     /// between 1 and 7 bits set. The idea is to encode sequences of clean words
-    /// using run-length encoding and to leave sequences of dirty words as-is.</p>
-    /// <table>
-    ///   <tr><th>Token</th><th>Clean length+</th><th>Dirty length+</th><th>Dirty words</th></tr>
-    ///   <tr><td>1 byte</td><td>0-n bytes</td><td>0-n bytes</td><td>0-n bytes</td></tr>
-    /// </table>
-    /// <ul>
-    ///   <li><b>Token</b> encodes whether clean means full of zeros or ones in the
-    /// first bit, the number of clean words minus 2 on the next 3 bits and the
-    /// number of dirty words on the last 4 bits. The higher-order bit is a
-    /// continuation bit, meaning that the number is incomplete and needs additional
-    /// bytes to be read.</li>
-    ///   <li><b>Clean length+</b>: If clean length has its higher-order bit set,
-    /// you need to read a <seealso cref="DataInput#readVInt() vint"/>, shift it by 3 bits on
-    /// the left side and add it to the 3 bits which have been read in the token.</li>
-    ///   <li><b>Dirty length+</b> works the same way as <b>Clean length+</b> but
-    /// on 4 bits and for the length of dirty words.</li>
-    ///   <li><b>Dirty words</b> are the dirty words, there are <b>Dirty length</b>
-    /// of them.</li>
-    /// </ul>
-    /// <p>this format cannot encode sequences of less than 2 clean words and 0 dirty
+    /// using run-length encoding and to leave sequences of dirty words as-is.</para>
+    /// <list type="table">
+    ///     <listheader><term>Token</term><term>Clean length+</term><term>Dirty length+</term><term>Dirty words</term></listheader>
+    ///     <item><term>1 byte</term><term>0-n bytes</term><term>0-n bytes</term><term>0-n bytes</term></item>
+    /// </list>
+    /// <list type="bullet">
+    ///     <item><term><b>Token</b></term><description> encodes whether clean means full of zeros or ones in the
+    ///         first bit, the number of clean words minus 2 on the next 3 bits and the
+    ///         number of dirty words on the last 4 bits. The higher-order bit is a
+    ///         continuation bit, meaning that the number is incomplete and needs additional
+    ///         bytes to be read.</description></item>
+    ///     <item><term><b>Clean length+</b>:</term><description> If clean length has its higher-order bit set,
+    ///         you need to read a vint (<see cref="Store.DataInput.ReadVInt32()"/>), shift it by 3 bits on
+    ///         the left side and add it to the 3 bits which have been read in the token.</description></item>
+    ///     <item><term><b>Dirty length+</b></term><description> works the same way as <b>Clean length+</b> but
+    ///         on 4 bits and for the length of dirty words.</description></item>
+    ///     <item><term><b>Dirty words</b></term><description>are the dirty words, there are <b>Dirty length</b>
+    ///         of them.</description></item>
+    /// </list>
+    /// <para>This format cannot encode sequences of less than 2 clean words and 0 dirty
     /// word. The reason is that if you find a single clean word, you should rather
-    /// encode it as a dirty word. this takes the same space as starting a new
+    /// encode it as a dirty word. This takes the same space as starting a new
     /// sequence (since you need one byte for the token) but will be lighter to
     /// decode. There is however an exception for the first sequence. Since the first
     /// sequence may start directly with a dirty word, the clean length is encoded
-    /// directly, without subtracting 2.</p>
-    /// <p>There is an additional restriction on the format: the sequence of dirty
-    /// words is not allowed to contain two consecutive clean words. this restriction
+    /// directly, without subtracting 2.</para>
+    /// <para>There is an additional restriction on the format: the sequence of dirty
+    /// words is not allowed to contain two consecutive clean words. This restriction
     /// exists to make sure no space is wasted and to make sure iterators can read
-    /// the next doc ID by reading at most 2 dirty words.</p>
+    /// the next doc ID by reading at most 2 dirty words.</para>
     /// @lucene.experimental
     /// </summary>
     public sealed class WAH8DocIdSet : DocIdSet
@@ -110,14 +110,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Same as <seealso cref="#intersect(Collection, int)"/> with the default index interval. </summary>
+        /// Same as <see cref="Intersect(ICollection{WAH8DocIdSet}, int)"/> with the default index interval. </summary>
         public static WAH8DocIdSet Intersect(ICollection<WAH8DocIdSet> docIdSets)
         {
             return Intersect(docIdSets, DEFAULT_INDEX_INTERVAL);
         }
 
         /// <summary>
-        /// Compute the intersection of the provided sets. this method is much faster than
+        /// Compute the intersection of the provided sets. This method is much faster than
         /// computing the intersection manually since it operates directly at the byte level.
         /// </summary>
         public static WAH8DocIdSet Intersect(ICollection<WAH8DocIdSet> docIdSets, int indexInterval)
@@ -184,14 +184,14 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Same as <seealso cref="#union(Collection, int)"/> with the default index interval. </summary>
+        /// Same as <see cref="Union(ICollection{WAH8DocIdSet}, int)"/> with the default index interval. </summary>
         public static WAH8DocIdSet Union(ICollection<WAH8DocIdSet> docIdSets)
         {
             return Union(docIdSets, DEFAULT_INDEX_INTERVAL);
         }
 
         /// <summary>
-        /// Compute the union of the provided sets. this method is much faster than
+        /// Compute the union of the provided sets. This method is much faster than
         /// computing the union manually since it operates directly at the byte level.
         /// </summary>
         public static WAH8DocIdSet Union(ICollection<WAH8DocIdSet> docIdSets, int indexInterval)
@@ -292,12 +292,12 @@ namespace Lucene.Net.Util
 
             /// <summary>
             /// Set the index interval. Smaller index intervals improve performance of
-            ///  <seealso cref="DocIdSetIterator#advance(int)"/> but make the <seealso cref="DocIdSet"/>
-            ///  larger. An index interval <code>i</code> makes the index add an overhead
-            ///  which is at most <code>4/i</code>, but likely much less.The default index
-            ///  interval is <code>8</code>, meaning the index has an overhead of at most
-            ///  50%. To disable indexing, you can pass <see cref="int.MaxValue"/> as an
-            ///  index interval.
+            /// <see cref="DocIdSetIterator.Advance(int)"/> but make the <see cref="DocIdSet"/>
+            /// larger. An index interval <c>i</c> makes the index add an overhead
+            /// which is at most <c>4/i</c>, but likely much less. The default index
+            /// interval is <c>8</c>, meaning the index has an overhead of at most
+            /// 50%. To disable indexing, you can pass <see cref="int.MaxValue"/> as an
+            /// index interval.
             /// </summary>
             public virtual object SetIndexInterval(int indexInterval)
             {
@@ -454,7 +454,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Build a new <seealso cref="WAH8DocIdSet"/>. </summary>
+            /// Build a new <see cref="WAH8DocIdSet"/>. </summary>
             public virtual WAH8DocIdSet Build()
             {
                 if (cardinality == 0)
@@ -509,7 +509,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// A builder for <seealso cref="WAH8DocIdSet"/>s. </summary>
+        /// A builder for <see cref="WAH8DocIdSet"/>s. </summary>
         public sealed class Builder : WordBuilder
         {
             private int lastDocID;
@@ -554,7 +554,7 @@ namespace Lucene.Net.Util
             }
 
             /// <summary>
-            /// Add the content of the provided <seealso cref="DocIdSetIterator"/>. </summary>
+            /// Add the content of the provided <see cref="DocIdSetIterator"/>. </summary>
             public Builder Add(DocIdSetIterator disi)
             {
                 for (int doc = disi.NextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = disi.NextDoc())
@@ -893,7 +893,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Return the number of documents in this <seealso cref="DocIdSet"/> in constant time. </summary>
+        /// Return the number of documents in this <see cref="DocIdSet"/> in constant time. </summary>
         public int Cardinality()
         {
             return cardinality;

http://git-wip-us.apache.org/repos/asf/lucenenet/blob/268e78d4/src/Lucene.Net/Util/WeakIdentityMap.cs
----------------------------------------------------------------------
diff --git a/src/Lucene.Net/Util/WeakIdentityMap.cs b/src/Lucene.Net/Util/WeakIdentityMap.cs
index 2999d94..a1ba475 100644
--- a/src/Lucene.Net/Util/WeakIdentityMap.cs
+++ b/src/Lucene.Net/Util/WeakIdentityMap.cs
@@ -24,38 +24,38 @@ namespace Lucene.Net.Util
 	 */
 
     /// <summary>
-    /// Implements a combination of <seealso cref="java.util.WeakHashMap"/> and
-    /// <seealso cref="java.util.IdentityHashMap"/>.
-    /// Useful for caches that need to key off of a {@code ==} comparison
-    /// instead of a {@code .equals}.
+    /// Implements a combination of <c>java.util.WeakHashMap</c> and
+    /// <c>java.util.IdentityHashMap</c>.
+    /// Useful for caches that need to key off of a <c>==</c> comparison
+    /// instead of a <c>.Equals(object)</c>.
     ///
-    /// <p>this class is not a general-purpose <seealso cref="java.util.Map"/>
+    /// <para/>This class is not a general-purpose <see cref="IDictionary{TKey, TValue}"/>
     /// implementation! It intentionally violates
-    /// Map's general contract, which mandates the use of the equals method
-    /// when comparing objects. this class is designed for use only in the
+    /// <see cref="IDictionary{TKey, TValue}"/>'s general contract, which mandates the use of the <see cref="object.Equals(object)"/> method
+    /// when comparing objects. This class is designed for use only in the
     /// rare cases wherein reference-equality semantics are required.
     ///
-    /// <p>this implementation was forked from <a href="http://cxf.apache.org/">Apache CXF</a>
-    /// but modified to <b>not</b> implement the <seealso cref="java.util.Map"/> interface and
+    /// <para/>This implementation was forked from <a href="http://cxf.apache.org/">Apache CXF</a>
+    /// but modified to <b>not</b> implement the <see cref="IDictionary{TKey, TValue}"/> interface and
     /// without any set views on it, as those are error-prone and inefficient,
-    /// if not implemented carefully. The map only contains <seealso cref="Iterator"/> implementations
-    /// on the values and not-GCed keys. Lucene's implementation also supports {@code null}
+    /// if not implemented carefully. The map only contains <see cref="IEnumerable{T}.GetEnumerator()"/> implementations
+    /// on the values and not-GCed keys. Lucene's implementation also supports <c>null</c>
     /// keys, but those are never weak!
     ///
-    /// <p><a name="reapInfo" />The map supports two modes of operation:
-    /// <ul>
-    ///  <li>{@code reapOnRead = true}: this behaves identical to a <seealso cref="java.util.WeakHashMap"/>
-    ///  where it also cleans up the reference queue on every read operation (<seealso cref="#get(Object)"/>,
-    ///  <seealso cref="#containsKey(Object)"/>, <seealso cref="#size()"/>, <seealso cref="#valueIterator()"/>), freeing map entries
-    ///  of already GCed keys.</li>
-    ///  <li>{@code reapOnRead = false}: this mode does not call <seealso cref="#reap()"/> on every read
-    ///  operation. In this case, the reference queue is only cleaned up on write operations
-    ///  (like <seealso cref="#put(Object, Object)"/>). this is ideal for maps with few entries where
-    ///  the keys are unlikely be garbage collected, but there are lots of <seealso cref="#get(Object)"/>
-    ///  operations. The code can still call <seealso cref="#reap()"/> to manually clean up the queue without
-    ///  doing a write operation.</li>
-    /// </ul>
-    ///
+    /// <para/><a name="reapInfo" />The map supports two modes of operation:
+    /// <list type="bullet">
+    ///     <item><term><c>reapOnRead = true</c>:</term><description> This behaves identical to a <c>java.util.WeakHashMap</c>
+    ///         where it also cleans up the reference queue on every read operation (<see cref="Get(object)"/>,
+    ///         <see cref="ContainsKey(object)"/>, <see cref="Count"/>, <see cref="GetValueEnumerator()"/>), freeing map entries
+    ///         of already GCed keys.</description></item>
+    ///     <item><term><c>reapOnRead = false</c>:</term><description> This mode does not call <see cref="Reap()"/> on every read
+    ///         operation. In this case, the reference queue is only cleaned up on write operations
+    ///         (like <see cref="Put(TKey, TValue)"/>). This is ideal for maps with few entries where
+    ///         the keys are unlikely be garbage collected, but there are lots of <see cref="Get(object)"/>
+    ///         operations. The code can still call <see cref="Reap()"/> to manually clean up the queue without
+    ///         doing a write operation.</description></item>
+    /// </list>
+    /// <para/>
     /// @lucene.internal
     /// </summary>
     public sealed class WeakIdentityMap<TKey, TValue>
@@ -66,7 +66,7 @@ namespace Lucene.Net.Util
         private readonly bool reapOnRead;
 
         /// <summary>
-        /// Creates a new {@code WeakIdentityMap} based on a non-synchronized <seealso cref="HashMap"/>.
+        /// Creates a new <see cref="WeakIdentityMap{TKey, TValue}"/> based on a non-synchronized <see cref="Dictionary{TKey, TValue}"/>.
         /// The map <a href="#reapInfo">cleans up the reference queue on every read operation</a>.
         /// </summary>
         public static WeakIdentityMap<TKey, TValue> NewHashMap()
@@ -75,7 +75,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new {@code WeakIdentityMap} based on a non-synchronized <seealso cref="HashMap"/>. </summary>
+        /// Creates a new <see cref="WeakIdentityMap{TKey, TValue}"/> based on a non-synchronized <see cref="Dictionary{TKey, TValue}"/>. </summary>
         /// <param name="reapOnRead"> controls if the map <a href="#reapInfo">cleans up the reference queue on every read operation</a>. </param>
         public static WeakIdentityMap<TKey, TValue> NewHashMap(bool reapOnRead)
         {
@@ -83,7 +83,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new {@code WeakIdentityMap} based on a <seealso cref="ConcurrentHashMap"/>.
+        /// Creates a new <see cref="WeakIdentityMap{TKey, TValue}"/> based on a <see cref="ConcurrentDictionary{TKey, TValue}"/>.
         /// The map <a href="#reapInfo">cleans up the reference queue on every read operation</a>.
         /// </summary>
         public static WeakIdentityMap<TKey, TValue> NewConcurrentHashMap()
@@ -92,7 +92,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Creates a new {@code WeakIdentityMap} based on a <seealso cref="ConcurrentHashMap"/>. </summary>
+        /// Creates a new <see cref="WeakIdentityMap{TKey, TValue}"/> based on a <see cref="ConcurrentDictionary{TKey, TValue}"/>. </summary>
         /// <param name="reapOnRead"> controls if the map <a href="#reapInfo">cleans up the reference queue on every read operation</a>. </param>
         public static WeakIdentityMap<TKey, TValue> NewConcurrentHashMap(bool reapOnRead)
         {
@@ -116,7 +116,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns {@code true} if this map contains a mapping for the specified key. </summary>
+        /// Returns <c>true</c> if this map contains a mapping for the specified key. </summary>
         public bool ContainsKey(object key)
         {
             if (reapOnRead)
@@ -157,7 +157,10 @@ namespace Lucene.Net.Util
             return backingStore[new IdentityWeakReference(key)] = value;
         }
 
-        public IEnumerable<TKey> Keys
+        /// <summary>
+        /// Gets an <see cref="ICollection{TKey}"/> object containing the keys of the <see cref="WeakIdentityMap{TKey, TValue}"/>.
+        /// </summary>
+        public IEnumerable<TKey> Keys // LUCENENET TODO: API - change to ICollection<T>
         {
             get
             {
@@ -193,7 +196,10 @@ namespace Lucene.Net.Util
             }
         }
 
-        public IEnumerable<TValue> Values
+        /// <summary>
+        /// Gets an <see cref="ICollection{TKey}"/> object containing the values of the <see cref="WeakIdentityMap{TKey, TValue}"/>.
+        /// </summary>
+        public IEnumerable<TValue> Values // LUCENENET TODO: API - change to ICollection<T>
         {
             get
             {
@@ -203,7 +209,7 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns {@code true} if this map contains no key-value mappings. </summary>
+        /// Returns <c>true</c> if this map contains no key-value mappings. </summary>
         public bool IsEmpty
         {
             get
@@ -215,8 +221,8 @@ namespace Lucene.Net.Util
         /// <summary>
         /// Removes the mapping for a key from this weak hash map if it is present.
         /// Returns the value to which this map previously associated the key,
-        /// or {@code null} if the map contained no mapping for the key.
-        /// A return value of {@code null} does not necessarily indicate that
+        /// or <c>null</c> if the map contained no mapping for the key.
+        /// A return value of <c>null</c> does not necessarily indicate that
         /// the map contained.
         /// </summary>
         public bool Remove(object key)
@@ -226,9 +232,10 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// Returns the number of key-value mappings in this map. this result is a snapshot,
+        /// Returns the number of key-value mappings in this map. This result is a snapshot,
         /// and may not reflect unprocessed entries that will be removed before next
         /// attempted access because they are no longer referenced.
+        /// <para/>
         /// NOTE: This was size() in Lucene.
         /// </summary>
         public int Count
@@ -308,9 +315,9 @@ namespace Lucene.Net.Util
 
         /// <summary>
         /// Returns an iterator over all values of this map.
-        /// this iterator may return values whose key is already
+        /// This iterator may return values whose key is already
         /// garbage collected while iterator is consumed,
-        /// especially if {@code reapOnRead} is {@code false}.
+        /// especially if <see cref="reapOnRead"/> is <c>false</c>.
         /// <para/>
         /// NOTE: This was valueIterator() in Lucene.
         /// </summary>
@@ -324,11 +331,12 @@ namespace Lucene.Net.Util
         }
 
         /// <summary>
-        /// this method manually cleans up the reference queue to remove all garbage
+        /// This method manually cleans up the reference queue to remove all garbage
         /// collected key/value pairs from the map. Calling this method is not needed
-        /// if {@code reapOnRead = true}. Otherwise it might be a good idea
-        /// to call this method when there is spare time (e.g. from a background thread). </summary>
-        /// <seealso cref= <a href="#reapInfo">Information about the <code>reapOnRead</code> setting</a> </seealso>			
+        /// if <c>reapOnRead = true</c>. Otherwise it might be a good idea
+        /// to call this method when there is spare time (e.g. from a background thread). 
+        /// <a href="#reapInfo">Information about the <c>reapOnRead</c> setting</a>		
+        /// </summary>
         public void Reap()
         {
             List<IdentityWeakReference> keysToRemove = null;