You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucy.apache.org by nw...@apache.org on 2016/02/26 14:44:36 UTC

svn commit: r1732480 [2/6] - in /lucy/site/trunk/content/docs/test: ./ Lucy/ Lucy/Analysis/ Lucy/Docs/ Lucy/Docs/Cookbook/ Lucy/Docs/Tutorial/ Lucy/Document/ Lucy/Highlight/ Lucy/Index/ Lucy/Object/ Lucy/Plan/ Lucy/Search/ Lucy/Search/Collector/ Lucy/S...

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/Cookbook/CustomQueryParser.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/Cookbook/CustomQueryParser.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/Cookbook/CustomQueryParser.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/Cookbook/CustomQueryParser.mdtext Fri Feb 26 13:44:34 2016
@@ -41,158 +41,185 @@ name="Single-field_parser"
 and it will analyze text using a fixed choice of English EasyAnalyzer.
 We won&#8217;t subclass Lucy::Search::QueryParser just yet.</p>
 
-<pre>    package FlatQueryParser;
-    use Lucy::Search::TermQuery;
-    use Lucy::Search::PhraseQuery;
-    use Lucy::Search::ORQuery;
-    use Carp;
-    
-    sub new { 
-        my $analyzer = Lucy::Analysis::EasyAnalyzer-&#62;new(
-            language =&#62; &#39;en&#39;,
-        );
-        return bless { 
-            field    =&#62; &#39;content&#39;,
-            analyzer =&#62; $analyzer,
-        }, __PACKAGE__;
-    }</pre>
+<pre>package FlatQueryParser;
+use Lucy::Search::TermQuery;
+use Lucy::Search::PhraseQuery;
+use Lucy::Search::ORQuery;
+use Carp;
+
+sub new { 
+    my $analyzer = Lucy::Analysis::EasyAnalyzer-&#62;new(
+        language =&#62; &#39;en&#39;,
+    );
+    return bless { 
+        field    =&#62; &#39;content&#39;,
+        analyzer =&#62; $analyzer,
+    }, __PACKAGE__;
+}</pre>
 
 <p>Some private helper subs for creating TermQuery and PhraseQuery objects will help keep the size of our main parse() subroutine down:</p>
 
-<pre>    sub _make_term_query {
-        my ( $self, $term ) = @_;
-        return Lucy::Search::TermQuery-&#62;new(
-            field =&#62; $self-&#62;{field},
-            term  =&#62; $term,
-        );
-    }
-    
-    sub _make_phrase_query {
-        my ( $self, $terms ) = @_;
-        return Lucy::Search::PhraseQuery-&#62;new(
-            field =&#62; $self-&#62;{field},
-            terms =&#62; $terms,
-        );
-    }</pre>
+<pre>sub _make_term_query {
+    my ( $self, $term ) = @_;
+    return Lucy::Search::TermQuery-&#62;new(
+        field =&#62; $self-&#62;{field},
+        term  =&#62; $term,
+    );
+}
+
+sub _make_phrase_query {
+    my ( $self, $terms ) = @_;
+    return Lucy::Search::PhraseQuery-&#62;new(
+        field =&#62; $self-&#62;{field},
+        terms =&#62; $terms,
+    );
+}</pre>
 
 <p>Our private _tokenize() method treats double-quote delimited material as a single token and splits on whitespace everywhere else.</p>
 
-<pre>    sub _tokenize {
-        my ( $self, $query_string ) = @_;
-        my @tokens;
-        while ( length $query_string ) {
-            if ( $query_string =~ s/^\s+// ) {
-                next;    # skip whitespace
-            }
-            elsif ( $query_string =~ s/^(&#34;[^&#34;]*(?:&#34;|$))// ) {
-                push @tokens, $1;    # double-quoted phrase
-            }
-            else {
-                $query_string =~ s/(\S+)//;
-                push @tokens, $1;    # single word
-            }
+<pre>sub _tokenize {
+    my ( $self, $query_string ) = @_;
+    my @tokens;
+    while ( length $query_string ) {
+        if ( $query_string =~ s/^\s+// ) {
+            next;    # skip whitespace
         }
-        return \@tokens;
-    }</pre>
-
-<p>The main parsing routine creates an array of tokens by calling _tokenize(), runs the tokens through through the EasyAnalyzer, creates TermQuery or PhraseQuery objects according to how many tokens emerge from the EasyAnalyzer&#8217;s split() method, and adds each of the sub-queries to the primary ORQuery.</p>
+        elsif ( $query_string =~ s/^(&#34;[^&#34;]*(?:&#34;|$))// ) {
+            push @tokens, $1;    # double-quoted phrase
+        }
+        else {
+            $query_string =~ s/(\S+)//;
+            push @tokens, $1;    # single word
+        }
+    }
+    return \@tokens;
+}</pre>
 
-<pre>    sub parse {
-        my ( $self, $query_string ) = @_;
-        my $tokens   = $self-&#62;_tokenize($query_string);
-        my $analyzer = $self-&#62;{analyzer};
-        my $or_query = Lucy::Search::ORQuery-&#62;new;
-    
-        for my $token (@$tokens) {
-            if ( $token =~ s/^&#34;// ) {
-                $token =~ s/&#34;$//;
-                my $terms = $analyzer-&#62;split($token);
-                my $query = $self-&#62;_make_phrase_query($terms);
-                $or_query-&#62;add_child($phrase_query);
+<p>The main parsing routine creates an array of tokens by calling _tokenize(),
+runs the tokens through through the EasyAnalyzer,
+creates TermQuery or PhraseQuery objects according to how many tokens emerge from the EasyAnalyzer&#8217;s split() method,
+and adds each of the sub-queries to the primary ORQuery.</p>
+
+<pre>sub parse {
+    my ( $self, $query_string ) = @_;
+    my $tokens   = $self-&#62;_tokenize($query_string);
+    my $analyzer = $self-&#62;{analyzer};
+    my $or_query = Lucy::Search::ORQuery-&#62;new;
+
+    for my $token (@$tokens) {
+        if ( $token =~ s/^&#34;// ) {
+            $token =~ s/&#34;$//;
+            my $terms = $analyzer-&#62;split($token);
+            my $query = $self-&#62;_make_phrase_query($terms);
+            $or_query-&#62;add_child($phrase_query);
+        }
+        else {
+            my $terms = $analyzer-&#62;split($token);
+            if ( @$terms == 1 ) {
+                my $query = $self-&#62;_make_term_query( $terms-&#62;[0] );
+                $or_query-&#62;add_child($query);
             }
-            else {
-                my $terms = $analyzer-&#62;split($token);
-                if ( @$terms == 1 ) {
-                    my $query = $self-&#62;_make_term_query( $terms-&#62;[0] );
-                    $or_query-&#62;add_child($query);
-                }
-                elsif ( @$terms &#62; 1 ) {
-                    my $query = $self-&#62;_make_phrase_query($terms);
-                    $or_query-&#62;add_child($query);
-                }
+            elsif ( @$terms &#62; 1 ) {
+                my $query = $self-&#62;_make_phrase_query($terms);
+                $or_query-&#62;add_child($query);
             }
         }
-    
-        return $or_query;
-    }</pre>
+    }
+
+    return $or_query;
+}</pre>
 
 <h3><a class='u'
 name="Multi-field_parser"
 >Multi-field parser</a></h3>
 
-<p>Most often, the end user will want their search query to match not only a single &#8216;content&#8217; field, but also &#8216;title&#8217; and so on. To make that happen, we have to turn queries such as this&#8230;</p>
+<p>Most often,
+the end user will want their search query to match not only a single &#8216;content&#8217; field,
+but also &#8216;title&#8217; and so on.
+To make that happen,
+we have to turn queries such as this&#8230;</p>
 
-<pre>    foo AND NOT bar</pre>
+<pre>foo AND NOT bar</pre>
 
 <p>&#8230; into the logical equivalent of this:</p>
 
-<pre>    (title:foo OR content:foo) AND NOT (title:bar OR content:bar)</pre>
-
-<p>Rather than continue with our own from-scratch parser class and write the routines to accomplish that expansion, we&#8217;re now going to subclass Lucy::Search::QueryParser and take advantage of some of its existing methods.</p>
+<pre>(title:foo OR content:foo) AND NOT (title:bar OR content:bar)</pre>
 
-<p>Our first parser implementation had the &#8220;content&#8221; field name and the choice of English EasyAnalyzer hard-coded for simplicity, but we don&#8217;t need to do that once we subclass Lucy::Search::QueryParser. QueryParser&#8217;s constructor &#8211; which we will inherit, allowing us to eliminate our own constructor &#8211; requires a Schema which conveys field and Analyzer information, so we can just defer to that.</p>
+<p>Rather than continue with our own from-scratch parser class and write the routines to accomplish that expansion,
+we&#8217;re now going to subclass Lucy::Search::QueryParser and take advantage of some of its existing methods.</p>
 
-<pre>    package FlatQueryParser;
-    use base qw( Lucy::Search::QueryParser );
-    use Lucy::Search::TermQuery;
-    use Lucy::Search::PhraseQuery;
-    use Lucy::Search::ORQuery;
-    use PrefixQuery;
-    use Carp;
-    
-    # Inherit new()</pre>
-
-<p>We&#8217;re also going to jettison our _make_term_query() and _make_phrase_query() helper subs and chop our parse() subroutine way down. Our revised parse() routine will generate Lucy::Search::LeafQuery objects instead of TermQueries and PhraseQueries:</p>
-
-<pre>    sub parse {
-        my ( $self, $query_string ) = @_;
-        my $tokens = $self-&#62;_tokenize($query_string);
-        my $or_query = Lucy::Search::ORQuery-&#62;new;
-        for my $token (@$tokens) {
-            my $leaf_query = Lucy::Search::LeafQuery-&#62;new( text =&#62; $token );
-            $or_query-&#62;add_child($leaf_query);
-        }
-        return $self-&#62;expand($or_query);
-    }</pre>
+<p>Our first parser implementation had the &#8220;content&#8221; field name and the choice of English EasyAnalyzer hard-coded for simplicity,
+but we don&#8217;t need to do that once we subclass Lucy::Search::QueryParser.
+QueryParser&#8217;s constructor &#8211; which we will inherit,
+allowing us to eliminate our own constructor &#8211; requires a Schema which conveys field and Analyzer information,
+so we can just defer to that.</p>
+
+<pre>package FlatQueryParser;
+use base qw( Lucy::Search::QueryParser );
+use Lucy::Search::TermQuery;
+use Lucy::Search::PhraseQuery;
+use Lucy::Search::ORQuery;
+use PrefixQuery;
+use Carp;
+
+# Inherit new()</pre>
+
+<p>We&#8217;re also going to jettison our _make_term_query() and _make_phrase_query() helper subs and chop our parse() subroutine way down.
+Our revised parse() routine will generate Lucy::Search::LeafQuery objects instead of TermQueries and PhraseQueries:</p>
+
+<pre>sub parse {
+    my ( $self, $query_string ) = @_;
+    my $tokens = $self-&#62;_tokenize($query_string);
+    my $or_query = Lucy::Search::ORQuery-&#62;new;
+    for my $token (@$tokens) {
+        my $leaf_query = Lucy::Search::LeafQuery-&#62;new( text =&#62; $token );
+        $or_query-&#62;add_child($leaf_query);
+    }
+    return $self-&#62;expand($or_query);
+}</pre>
 
-<p>The magic happens in QueryParser&#8217;s expand() method, which walks the ORQuery object we supply to it looking for LeafQuery objects, and calls expand_leaf() for each one it finds. expand_leaf() performs field-specific analysis, decides whether each query should be a TermQuery or a PhraseQuery, and if multiple fields are required, creates an ORQuery which mults out e.g. <code>foo</code> into <code>(title:foo OR content:foo)</code>.</p>
+<p>The magic happens in QueryParser&#8217;s expand() method,
+which walks the ORQuery object we supply to it looking for LeafQuery objects,
+and calls expand_leaf() for each one it finds.
+expand_leaf() performs field-specific analysis,
+decides whether each query should be a TermQuery or a PhraseQuery,
+and if multiple fields are required,
+creates an ORQuery which mults out e.g.
+<code>foo</code> into <code>(title:foo OR content:foo)</code>.</p>
 
 <h3><a class='u'
 name="Extending_the_query_language"
 >Extending the query language</a></h3>
 
-<p>To add support for trailing wildcards to our query language, we need to override expand_leaf() to accommodate PrefixQuery, while deferring to the parent class implementation on TermQuery and PhraseQuery.</p>
-
-<pre>    sub expand_leaf {
-        my ( $self, $leaf_query ) = @_;
-        my $text = $leaf_query-&#62;get_text;
-        if ( $text =~ /\*$/ ) {
-            my $or_query = Lucy::Search::ORQuery-&#62;new;
-            for my $field ( @{ $self-&#62;get_fields } ) {
-                my $prefix_query = PrefixQuery-&#62;new(
-                    field        =&#62; $field,
-                    query_string =&#62; $text,
-                );
-                $or_query-&#62;add_child($prefix_query);
-            }
-            return $or_query;
-        }
-        else {
-            return $self-&#62;SUPER::expand_leaf($leaf_query);
+<p>To add support for trailing wildcards to our query language,
+we need to override expand_leaf() to accommodate PrefixQuery,
+while deferring to the parent class implementation on TermQuery and PhraseQuery.</p>
+
+<pre>sub expand_leaf {
+    my ( $self, $leaf_query ) = @_;
+    my $text = $leaf_query-&#62;get_text;
+    if ( $text =~ /\*$/ ) {
+        my $or_query = Lucy::Search::ORQuery-&#62;new;
+        for my $field ( @{ $self-&#62;get_fields } ) {
+            my $prefix_query = PrefixQuery-&#62;new(
+                field        =&#62; $field,
+                query_string =&#62; $text,
+            );
+            $or_query-&#62;add_child($prefix_query);
         }
-    }</pre>
+        return $or_query;
+    }
+    else {
+        return $self-&#62;SUPER::expand_leaf($leaf_query);
+    }
+}</pre>
 
-<p>Ordinarily, those asterisks would have been stripped when running tokens through the EasyAnalyzer &#8211; query strings containing &#8220;foo*&#8221; would produce TermQueries for the term &#8220;foo&#8221;. Our override intercepts tokens with trailing asterisks and processes them as PrefixQueries before <code>SUPER::expand_leaf</code> can discard them, so that a search for &#8220;foo*&#8221; can match &#8220;food&#8221;, &#8220;foosball&#8221;, and so on.</p>
+<p>Ordinarily,
+those asterisks would have been stripped when running tokens through the EasyAnalyzer &#8211; query strings containing &#8220;foo*&#8221; would produce TermQueries for the term &#8220;foo&#8221;.
+Our override intercepts tokens with trailing asterisks and processes them as PrefixQueries before <code>SUPER::expand_leaf</code> can discard them,
+so that a search for &#8220;foo*&#8221; can match &#8220;food&#8221;,
+&#8220;foosball&#8221;,
+and so on.</p>
 
 <h3><a class='u'
 name="Usage"
@@ -200,13 +227,13 @@ name="Usage"
 
 <p>Insert our custom parser into the search.cgi sample app to get a feel for how it behaves:</p>
 
-<pre>    my $parser = FlatQueryParser-&#62;new( schema =&#62; $searcher-&#62;get_schema );
-    my $query  = $parser-&#62;parse( decode( &#39;UTF-8&#39;, $cgi-&#62;param(&#39;q&#39;) || &#39;&#39; ) );
-    my $hits   = $searcher-&#62;hits(
-        query      =&#62; $query,
-        offset     =&#62; $offset,
-        num_wanted =&#62; $page_size,
-    );
-    ...</pre>
+<pre>my $parser = FlatQueryParser-&#62;new( schema =&#62; $searcher-&#62;get_schema );
+my $query  = $parser-&#62;parse( decode( &#39;UTF-8&#39;, $cgi-&#62;param(&#39;q&#39;) || &#39;&#39; ) );
+my $hits   = $searcher-&#62;hits(
+    query      =&#62; $query,
+    offset     =&#62; $offset,
+    num_wanted =&#62; $page_size,
+);
+...</pre>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/Cookbook/FastUpdates.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/Cookbook/FastUpdates.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/Cookbook/FastUpdates.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/Cookbook/FastUpdates.mdtext Fri Feb 26 13:44:34 2016
@@ -56,90 +56,115 @@ name="Procrastinating_and_playing_catch-
 If we subclass IndexManager and override the method so that it always returns an empty array,
 we get consistently quick performance:</p>
 
-<pre>    package NoMergeManager;
-    use base qw( Lucy::Index::IndexManager );
-    sub recycle { [] }
-    
-    package main;
-    my $indexer = Lucy::Index::Indexer-&#62;new(
-        index =&#62; &#39;/path/to/index&#39;,
-        manager =&#62; NoMergeManager-&#62;new,
-    );
-    ...
-    $indexer-&#62;commit;</pre>
-
-<p>However, we can&#8217;t procrastinate forever. Eventually, we&#8217;ll have to run an ordinary, uncontrolled indexing session, potentially triggering a large rewrite of lots of small and/or degraded segments:</p>
-
-<pre>    my $indexer = Lucy::Index::Indexer-&#62;new( 
-        index =&#62; &#39;/path/to/index&#39;, 
-        # manager =&#62; NoMergeManager-&#62;new,
-    );
-    ...
-    $indexer-&#62;commit;</pre>
+<pre>package NoMergeManager;
+use base qw( Lucy::Index::IndexManager );
+sub recycle { [] }
+
+package main;
+my $indexer = Lucy::Index::Indexer-&#62;new(
+    index =&#62; &#39;/path/to/index&#39;,
+    manager =&#62; NoMergeManager-&#62;new,
+);
+...
+$indexer-&#62;commit;</pre>
+
+<p>However,
+we can&#8217;t procrastinate forever.
+Eventually,
+we&#8217;ll have to run an ordinary,
+uncontrolled indexing session,
+potentially triggering a large rewrite of lots of small and/or degraded segments:</p>
+
+<pre>my $indexer = Lucy::Index::Indexer-&#62;new( 
+    index =&#62; &#39;/path/to/index&#39;, 
+    # manager =&#62; NoMergeManager-&#62;new,
+);
+...
+$indexer-&#62;commit;</pre>
 
 <h3><a class='u'
 name="Acceptable_worst-case_update_time,_slower_degradation"
->Acceptable worst-case update time, slower degradation</a></h3>
+>Acceptable worst-case update time,
+slower degradation</a></h3>
 
-<p>Never merging anything at all in the main indexing process is probably overkill. Small segments are relatively cheap to merge; we just need to guard against the big rewrites.</p>
+<p>Never merging anything at all in the main indexing process is probably overkill.
+Small segments are relatively cheap to merge; we just need to guard against the big rewrites.</p>
 
-<p>Setting a ceiling on the number of documents in the segments to be recycled allows us to avoid a mass proliferation of tiny, single-document segments, while still offering decent worst-case update speed:</p>
-
-<pre>    package LightMergeManager;
-    use base qw( Lucy::Index::IndexManager );
-    
-    sub recycle {
-        my $self = shift;
-        my $seg_readers = $self-&#62;SUPER::recycle(@_);
-        @$seg_readers = grep { $_-&#62;doc_max &#60; 10 } @$seg_readers;
-        return $seg_readers;
-    }</pre>
-
-<p>However, we still have to consolidate every once in a while, and while that happens content updates will be locked out.</p>
+<p>Setting a ceiling on the number of documents in the segments to be recycled allows us to avoid a mass proliferation of tiny,
+single-document segments,
+while still offering decent worst-case update speed:</p>
+
+<pre>package LightMergeManager;
+use base qw( Lucy::Index::IndexManager );
+
+sub recycle {
+    my $self = shift;
+    my $seg_readers = $self-&#62;SUPER::recycle(@_);
+    @$seg_readers = grep { $_-&#62;doc_max &#60; 10 } @$seg_readers;
+    return $seg_readers;
+}</pre>
+
+<p>However,
+we still have to consolidate every once in a while,
+and while that happens content updates will be locked out.</p>
 
 <h3><a class='u'
 name="Background_merging"
 >Background merging</a></h3>
 
-<p>If it&#8217;s not acceptable to lock out updates while the index consolidation process runs, the alternative is to move the consolidation process out of band, using <a href="../../../Lucy/Index/BackgroundMerger.html" class="podlinkpod"
+<p>If it&#8217;s not acceptable to lock out updates while the index consolidation process runs,
+the alternative is to move the consolidation process out of band,
+using <a href="../../../Lucy/Index/BackgroundMerger.html" class="podlinkpod"
 >BackgroundMerger</a>.</p>
 
-<p>It&#8217;s never safe to have more than one Indexer attempting to modify the content of an index at the same time, but a BackgroundMerger and an Indexer can operate simultaneously:</p>
+<p>It&#8217;s never safe to have more than one Indexer attempting to modify the content of an index at the same time,
+but a BackgroundMerger and an Indexer can operate simultaneously:</p>
 
-<pre>    # Indexing process.
-    use Scalar::Util qw( blessed );
-    my $retries = 0;
-    while (1) {
-        eval {
-            my $indexer = Lucy::Index::Indexer-&#62;new(
-                    index =&#62; &#39;/path/to/index&#39;,
-                    manager =&#62; LightMergeManager-&#62;new,
-                );
-            $indexer-&#62;add_doc($doc);
-            $indexer-&#62;commit;
-        };
-        last unless $@;
-        if ( blessed($@) and $@-&#62;isa(&#34;Lucy::Store::LockErr&#34;) ) {
-            # Catch LockErr.
-            warn &#34;Couldn&#39;t get lock ($retries retries)&#34;;
-            $retries++;
-        }
-        else {
-            die &#34;Write failed: $@&#34;;
-        }
+<pre># Indexing process.
+use Scalar::Util qw( blessed );
+my $retries = 0;
+while (1) {
+    eval {
+        my $indexer = Lucy::Index::Indexer-&#62;new(
+                index =&#62; &#39;/path/to/index&#39;,
+                manager =&#62; LightMergeManager-&#62;new,
+            );
+        $indexer-&#62;add_doc($doc);
+        $indexer-&#62;commit;
+    };
+    last unless $@;
+    if ( blessed($@) and $@-&#62;isa(&#34;Lucy::Store::LockErr&#34;) ) {
+        # Catch LockErr.
+        warn &#34;Couldn&#39;t get lock ($retries retries)&#34;;
+        $retries++;
     }
-    
-    # Background merge process.
-    my $manager = Lucy::Index::IndexManager-&#62;new;
-    $manager-&#62;set_write_lock_timeout(60_000);
-    my $bg_merger = Lucy::Index::BackgroundMerger-&#62;new(
-        index   =&#62; &#39;/path/to/index&#39;,
-        manager =&#62; $manager,
-    );
-    $bg_merger-&#62;commit;</pre>
-
-<p>The exception handling code becomes useful once you have more than one index modification process happening simultaneously. By default, Indexer tries several times to acquire a write lock over the span of one second, then holds it until <a href="../../../Lucy/Index/Indexer.html#commit" class="podlinkpod"
->commit()</a> completes. BackgroundMerger handles most of its work without the write lock, but it does need it briefly once at the beginning and once again near the end. Under normal loads, the internal retry logic will resolve conflicts, but if it&#8217;s not acceptable to miss an insert, you probably want to catch <a href="../../../Lucy/Store/LockErr.html" class="podlinkpod"
->LockErr</a> exceptions thrown by Indexer. In contrast, a LockErr from BackgroundMerger probably just needs to be logged.</p>
+    else {
+        die &#34;Write failed: $@&#34;;
+    }
+}
+
+# Background merge process.
+my $manager = Lucy::Index::IndexManager-&#62;new;
+$manager-&#62;set_write_lock_timeout(60_000);
+my $bg_merger = Lucy::Index::BackgroundMerger-&#62;new(
+    index   =&#62; &#39;/path/to/index&#39;,
+    manager =&#62; $manager,
+);
+$bg_merger-&#62;commit;</pre>
+
+<p>The exception handling code becomes useful once you have more than one index modification process happening simultaneously.
+By default,
+Indexer tries several times to acquire a write lock over the span of one second,
+then holds it until <a href="../../../Lucy/Index/Indexer.html#commit" class="podlinkpod"
+>commit()</a> completes.
+BackgroundMerger handles most of its work without the write lock,
+but it does need it briefly once at the beginning and once again near the end.
+Under normal loads,
+the internal retry logic will resolve conflicts,
+but if it&#8217;s not acceptable to miss an insert,
+you probably want to catch <a href="../../../Lucy/Store/LockErr.html" class="podlinkpod"
+>LockErr</a> exceptions thrown by Indexer.
+In contrast,
+a LockErr from BackgroundMerger probably just needs to be logged.</p>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/DevGuide.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/DevGuide.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/DevGuide.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/DevGuide.mdtext Fri Feb 26 13:44:34 2016
@@ -38,11 +38,17 @@ For more information see the Clownfish d
 but if there&#8217;s one thing you should know about Clownfish OO before you start hacking,
 it&#8217;s that method calls are differentiated from functions by capitalization:</p>
 
-<pre>    Indexer_Add_Doc   &#60;-- Method, typically uses dynamic dispatch.
-    Indexer_add_doc   &#60;-- Function, always a direct invocation.</pre>
+<pre>Indexer_Add_Doc   &#60;-- Method, typically uses dynamic dispatch.
+Indexer_add_doc   &#60;-- Function, always a direct invocation.</pre>
 
-<p>The C files within the Lucy core are where most of Lucy&#8217;s low-level functionality lies. They implement the interface defined by the Clownfish header files.</p>
+<p>The C files within the Lucy core are where most of Lucy&#8217;s low-level functionality lies.
+They implement the interface defined by the Clownfish header files.</p>
 
-<p>The C core is intentionally left incomplete, however; to be usable, it must be bound to a &#8220;host&#8221; language. (In this context, even C is considered a &#8220;host&#8221; which must implement the missing pieces and be &#8220;bound&#8221; to the core.) Some of the binding code is autogenerated by Clownfish on a spec customized for each language. Other pieces are hand-coded in either C (using the host&#8217;s C API) or the host language itself.</p>
+<p>The C core is intentionally left incomplete,
+however; to be usable,
+it must be bound to a &#8220;host&#8221; language.
+(In this context,
+even C is considered a &#8220;host&#8221; which must implement the missing pieces and be &#8220;bound&#8221; to the core.) Some of the binding code is autogenerated by Clownfish on a spec customized for each language.
+Other pieces are hand-coded in either C (using the host&#8217;s C API) or the host language itself.</p>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/DocIDs.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/DocIDs.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/DocIDs.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/DocIDs.mdtext Fri Feb 26 13:44:34 2016
@@ -21,18 +21,27 @@ name="Document_ids_are_signed_32-bit_int
 Because 0 is never a valid doc id,
 we can use it as a sentinel value:</p>
 
-<pre>    while ( my $doc_id = $posting_list-&#62;next ) {
-        ...
-    }</pre>
+<pre>while ( my $doc_id = $posting_list-&#62;next ) {
+    ...
+}</pre>
 
 <h3><a class='u'
 name="Document_ids_are_ephemeral"
 >Document ids are ephemeral</a></h3>
 
-<p>The document ids used by Lucy are associated with a single index snapshot. The moment an index is updated, the mapping of document ids to documents is subject to change.</p>
+<p>The document ids used by Lucy are associated with a single index snapshot.
+The moment an index is updated,
+the mapping of document ids to documents is subject to change.</p>
+
+<p>Since IndexReader objects represent a point-in-time view of an index,
+document ids are guaranteed to remain static for the life of the reader.
+However,
+because they are not permanent,
+Lucy document ids cannot be used as foreign keys to locate records in external data sources.
+If you truly need a primary key field,
+you must define it and populate it yourself.</p>
 
-<p>Since IndexReader objects represent a point-in-time view of an index, document ids are guaranteed to remain static for the life of the reader. However, because they are not permanent, Lucy document ids cannot be used as foreign keys to locate records in external data sources. If you truly need a primary key field, you must define it and populate it yourself.</p>
-
-<p>Furthermore, the order of document ids does not tell you anything about the sequence in which documents were added to the index.</p>
+<p>Furthermore,
+the order of document ids does not tell you anything about the sequence in which documents were added to the index.</p>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/FileFormat.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/FileFormat.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/FileFormat.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/FileFormat.mdtext Fri Feb 26 13:44:34 2016
@@ -23,40 +23,46 @@ an index is a directory.
 The files inside have a hierarchical relationship: an index is made up of &#8220;segments&#8221;,
 each of which is an independent inverted index with its own subdirectory; each segment is made up of several component parts.</p>
 
-<pre>    [index]--|
-             |--snapshot_XXX.json
-             |--schema_XXX.json
-             |--write.lock
-             |
-             |--seg_1--|
-             |         |--segmeta.json
-             |         |--cfmeta.json
-             |         |--cf.dat-------|
-             |                         |--[lexicon]
-             |                         |--[postings]
-             |                         |--[documents]
-             |                         |--[highlight]
-             |                         |--[deletions]
-             |
-             |--seg_2--|
-             |         |--segmeta.json
-             |         |--cfmeta.json
-             |         |--cf.dat-------|
-             |                         |--[lexicon]
-             |                         |--[postings]
-             |                         |--[documents]
-             |                         |--[highlight]
-             |                         |--[deletions]
-             |
-             |--[...]--| </pre>
+<pre>[index]--|
+         |--snapshot_XXX.json
+         |--schema_XXX.json
+         |--write.lock
+         |
+         |--seg_1--|
+         |         |--segmeta.json
+         |         |--cfmeta.json
+         |         |--cf.dat-------|
+         |                         |--[lexicon]
+         |                         |--[postings]
+         |                         |--[documents]
+         |                         |--[highlight]
+         |                         |--[deletions]
+         |
+         |--seg_2--|
+         |         |--segmeta.json
+         |         |--cfmeta.json
+         |         |--cf.dat-------|
+         |                         |--[lexicon]
+         |                         |--[postings]
+         |                         |--[documents]
+         |                         |--[highlight]
+         |                         |--[deletions]
+         |
+         |--[...]--| </pre>
 
 <h3><a class='u'
 name="Write-once_philosophy"
 >Write-once philosophy</a></h3>
 
-<p>All segment directory names consist of the string &#8220;seg_&#8221; followed by a number in base 36: seg_1, seg_5m, seg_p9s2 and so on, with higher numbers indicating more recent segments. Once a segment is finished and committed, its name is never re-used and its files are never modified.</p>
+<p>All segment directory names consist of the string &#8220;seg_&#8221; followed by a number in base 36: seg_1,
+seg_5m,
+seg_p9s2 and so on,
+with higher numbers indicating more recent segments.
+Once a segment is finished and committed,
+its name is never re-used and its files are never modified.</p>
 
-<p>Old segments become obsolete and can be removed when their data has been consolidated into new segments during the process of segment merging and optimization. A fully-optimized index has only one segment.</p>
+<p>Old segments become obsolete and can be removed when their data has been consolidated into new segments during the process of segment merging and optimization.
+A fully-optimized index has only one segment.</p>
 
 <h3><a class='u'
 name="Top-level_entries"
@@ -68,61 +74,121 @@ name="Top-level_entries"
 name="snapshot_XXX.json"
 >snapshot_XXX.json</a></h4>
 
-<p>A &#8220;snapshot&#8221; file, e.g. <code>snapshot_m7p.json</code>, is list of index files and directories. Because index files, once written, are never modified, the list of entries in a snapshot defines a point-in-time view of the data in an index.</p>
-
-<p>Like segment directories, snapshot files also utilize the unique-base-36-number naming convention; the higher the number, the more recent the file. The appearance of a new snapshot file within the index directory constitutes an index update. While a new segment is being written new files may be added to the index directory, but until a new snapshot file gets written, a Searcher opening the index for reading won&#8217;t know about them.</p>
+<p>A &#8220;snapshot&#8221; file,
+e.g.
+<code>snapshot_m7p.json</code>,
+is list of index files and directories.
+Because index files,
+once written,
+are never modified,
+the list of entries in a snapshot defines a point-in-time view of the data in an index.</p>
+
+<p>Like segment directories,
+snapshot files also utilize the unique-base-36-number naming convention; the higher the number,
+the more recent the file.
+The appearance of a new snapshot file within the index directory constitutes an index update.
+While a new segment is being written new files may be added to the index directory,
+but until a new snapshot file gets written,
+a Searcher opening the index for reading won&#8217;t know about them.</p>
 
 <h4><a class='u'
 name="schema_XXX.json"
 >schema_XXX.json</a></h4>
 
-<p>The schema file is a Schema object describing the index&#8217;s format, serialized as JSON. It, too, is versioned, and a given snapshot file will reference one and only one schema file.</p>
+<p>The schema file is a Schema object describing the index&#8217;s format,
+serialized as JSON.
+It,
+too,
+is versioned,
+and a given snapshot file will reference one and only one schema file.</p>
 
 <h4><a class='u'
 name="locks"
 >locks</a></h4>
 
-<p>By default, only one indexing process may safely modify the index at any given time. Processes reserve an index by laying claim to the <code>write.lock</code> file within the <code>locks/</code> directory. A smattering of other lock files may be used from time to time, as well.</p>
+<p>By default,
+only one indexing process may safely modify the index at any given time.
+Processes reserve an index by laying claim to the <code>write.lock</code> file within the <code>locks/</code> directory.
+A smattering of other lock files may be used from time to time,
+as well.</p>
 
 <h3><a class='u'
 name="A_segment(8217)s_component_parts"
 >A segment&#8217;s component parts</a></h3>
 
-<p>By default, each segment has up to five logical components: lexicon, postings, document storage, highlight data, and deletions. Binary data from these components gets stored in virtual files within the &#8220;cf.dat&#8221; compound file; metadata is stored in a shared &#8220;segmeta.json&#8221; file.</p>
+<p>By default,
+each segment has up to five logical components: lexicon,
+postings,
+document storage,
+highlight data,
+and deletions.
+Binary data from these components gets stored in virtual files within the &#8220;cf.dat&#8221; compound file; metadata is stored in a shared &#8220;segmeta.json&#8221; file.</p>
 
 <h4><a class='u'
 name="segmeta.json"
 >segmeta.json</a></h4>
 
-<p>The segmeta.json file is a central repository for segment metadata. In addition to information such as document counts and field numbers, it also warehouses arbitrary metadata on behalf of individual index components.</p>
+<p>The segmeta.json file is a central repository for segment metadata.
+In addition to information such as document counts and field numbers,
+it also warehouses arbitrary metadata on behalf of individual index components.</p>
 
 <h4><a class='u'
 name="Lexicon"
 >Lexicon</a></h4>
 
-<p>Each indexed field gets its own lexicon in each segment. The exact files involved depend on the field&#8217;s type, but generally speaking there will be two parts. First, there&#8217;s a primary <code>lexicon-XXX.dat</code> file which houses a complete term list associating terms with corpus frequency statistics, postings file locations, etc. Second, one or more &#8220;lexicon index&#8221; files may be present which contain periodic samples from the primary lexicon file to facilitate fast lookups.</p>
+<p>Each indexed field gets its own lexicon in each segment.
+The exact files involved depend on the field&#8217;s type,
+but generally speaking there will be two parts.
+First,
+there&#8217;s a primary <code>lexicon-XXX.dat</code> file which houses a complete term list associating terms with corpus frequency statistics,
+postings file locations,
+etc.
+Second,
+one or more &#8220;lexicon index&#8221; files may be present which contain periodic samples from the primary lexicon file to facilitate fast lookups.</p>
 
 <h4><a class='u'
 name="Postings"
 >Postings</a></h4>
 
 <p>&#8220;Posting&#8221; is a technical term from the field of <a href="../../Lucy/Docs/IRTheory.html" class="podlinkpod"
->information retrieval</a>, defined as a single instance of a one term indexing one document. If you are looking at the index in the back of a book, and you see that &#8220;freedom&#8221; is referenced on pages 8, 86, and 240, that would be three postings, which taken together form a &#8220;posting list&#8221;. The same terminology applies to an index in electronic form.</p>
-
-<p>Each segment has one postings file per indexed field. When a search is performed for a single term, first that term is looked up in the lexicon. If the term exists in the segment, the record in the lexicon will contain information about which postings file to look at and where to look.</p>
-
-<p>The first thing any posting record tells you is a document id. By iterating over all the postings associated with a term, you can find all the documents that match that term, a process which is analogous to looking up page numbers in a book&#8217;s index. However, each posting record typically contains other information in addition to document id, e.g. the positions at which the term occurs within the field.</p>
+>information retrieval</a>,
+defined as a single instance of a one term indexing one document.
+If you are looking at the index in the back of a book,
+and you see that &#8220;freedom&#8221; is referenced on pages 8,
+86,
+and 240,
+that would be three postings,
+which taken together form a &#8220;posting list&#8221;.
+The same terminology applies to an index in electronic form.</p>
+
+<p>Each segment has one postings file per indexed field.
+When a search is performed for a single term,
+first that term is looked up in the lexicon.
+If the term exists in the segment,
+the record in the lexicon will contain information about which postings file to look at and where to look.</p>
+
+<p>The first thing any posting record tells you is a document id.
+By iterating over all the postings associated with a term,
+you can find all the documents that match that term,
+a process which is analogous to looking up page numbers in a book&#8217;s index.
+However,
+each posting record typically contains other information in addition to document id,
+e.g.
+the positions at which the term occurs within the field.</p>
 
 <h4><a class='u'
 name="Documents"
 >Documents</a></h4>
 
-<p>The document storage section is a simple database, organized into two files:</p>
+<p>The document storage section is a simple database,
+organized into two files:</p>
 
 <ul>
 <li><b>documents.dat</b> - Serialized documents.</li>
 
-<li><b>documents.ix</b> - Document storage index, a solid array of 64-bit integers where each integer location corresponds to a document id, and the value at that location points at a file position in the documents.dat file.</li>
+<li><b>documents.ix</b> - Document storage index,
+a solid array of 64-bit integers where each integer location corresponds to a document id,
+and the value at that location points at a file position in the documents.dat file.</li>
 </ul>
 
 <h4><a class='u'
@@ -132,16 +198,21 @@ name="Highlight_data"
 <p>The files which store data used for excerpting and highlighting are organized similarly to the files used to store documents.</p>
 
 <ul>
-<li><b>highlight.dat</b> - Chunks of serialized highlight data, one per doc id.</li>
+<li><b>highlight.dat</b> - Chunks of serialized highlight data,
+one per doc id.</li>
 
-<li><b>highlight.ix</b> - Highlight data index &#8211; as with the <code>documents.ix</code> file, a solid array of 64-bit file pointers.</li>
+<li><b>highlight.ix</b> - Highlight data index &#8211; as with the <code>documents.ix</code> file,
+a solid array of 64-bit file pointers.</li>
 </ul>
 
 <h4><a class='u'
 name="Deletions"
 >Deletions</a></h4>
 
-<p>When a document is &#8220;deleted&#8221; from a segment, it is not actually purged right away; it is merely marked as &#8220;deleted&#8221; via a deletions file. Deletions files contains bit vectors with one bit for each document in the segment; if bit #254 is set then document 254 is deleted, and if that document turns up in a search it will be masked out.</p>
+<p>When a document is &#8220;deleted&#8221; from a segment,
+it is not actually purged right away; it is merely marked as &#8220;deleted&#8221; via a deletions file.
+Deletions files contains bit vectors with one bit for each document in the segment; if bit #254 is set then document 254 is deleted,
+and if that document turns up in a search it will be masked out.</p>
 
 <p>It is only when a segment&#8217;s contents are rewritten to a new segment during the segment-merging process that deleted documents truly go away.</p>
 
@@ -149,24 +220,51 @@ name="Deletions"
 name="Compound_Files"
 >Compound Files</a></h3>
 
-<p>If you peer inside an index directory, you won&#8217;t actually find any files named &#8220;documents.dat&#8221;, &#8220;highlight.ix&#8221;, etc. unless there is an indexing process underway. What you will find instead is one &#8220;cf.dat&#8221; and one &#8220;cfmeta.json&#8221; file per segment.</p>
-
-<p>To minimize the need for file descriptors at search-time, all per-segment binary data files are concatenated together in &#8220;cf.dat&#8221; at the close of each indexing session. Information about where each file begins and ends is stored in <code>cfmeta.json</code>. When the segment is opened for reading, a single file descriptor per &#8220;cf.dat&#8221; file can be shared among several readers.</p>
+<p>If you peer inside an index directory,
+you won&#8217;t actually find any files named &#8220;documents.dat&#8221;,
+&#8220;highlight.ix&#8221;,
+etc.
+unless there is an indexing process underway.
+What you will find instead is one &#8220;cf.dat&#8221; and one &#8220;cfmeta.json&#8221; file per segment.</p>
+
+<p>To minimize the need for file descriptors at search-time,
+all per-segment binary data files are concatenated together in &#8220;cf.dat&#8221; at the close of each indexing session.
+Information about where each file begins and ends is stored in <code>cfmeta.json</code>.
+When the segment is opened for reading,
+a single file descriptor per &#8220;cf.dat&#8221; file can be shared among several readers.</p>
 
 <h3><a class='u'
 name="A_Typical_Search"
 >A Typical Search</a></h3>
 
-<p>Here&#8217;s a simplified narrative, dramatizing how a search for &#8220;freedom&#8221; against a given segment plays out:</p>
+<p>Here&#8217;s a simplified narrative,
+dramatizing how a search for &#8220;freedom&#8221; against a given segment plays out:</p>
 
 <ul>
-<li>The searcher asks the relevant Lexicon Index, &#8220;Do you know anything about &#8216;freedom&#8217;?&#8221; Lexicon Index replies, &#8220;Can&#8217;t say for sure, but if the main Lexicon file does, &#8216;freedom&#8217; is probably somewhere around byte 21008&#8221;.</li>
-
-<li>The main Lexicon tells the searcher &#8220;One moment, let me scan our records&#8230; Yes, we have 2 documents which contain &#8216;freedom&#8217;. You&#8217;ll find them in seg_6/postings-4.dat starting at byte 66991.&#8221;</li>
-
-<li>The Postings file says &#8220;Yep, we have &#8216;freedom&#8217;, all right! Document id 40 has 1 &#8216;freedom&#8217;, and document 44 has 8. If you need to know more, like if any &#8216;freedom&#8217; is part of the phrase &#8216;freedom of speech&#8217;, ask me about positions!</li>
-
-<li>If the searcher is only looking for &#8216;freedom&#8217; in isolation, that&#8217;s where it stops. It now knows enough to assign the documents scores against &#8220;freedom&#8221;, with the 8-freedom document likely ranking higher than the single-freedom document.</li>
+<li>The searcher asks the relevant Lexicon Index,
+&#8220;Do you know anything about &#8216;freedom&#8217;?&#8221; Lexicon Index replies,
+&#8220;Can&#8217;t say for sure,
+but if the main Lexicon file does,
+&#8216;freedom&#8217; is probably somewhere around byte 21008&#8221;.</li>
+
+<li>The main Lexicon tells the searcher &#8220;One moment,
+let me scan our records&#8230; Yes,
+we have 2 documents which contain &#8216;freedom&#8217;.
+You&#8217;ll find them in seg_6/postings-4.dat starting at byte 66991.&#8221;</li>
+
+<li>The Postings file says &#8220;Yep,
+we have &#8216;freedom&#8217;,
+all right!
+Document id 40 has 1 &#8216;freedom&#8217;,
+and document 44 has 8.
+If you need to know more,
+like if any &#8216;freedom&#8217; is part of the phrase &#8216;freedom of speech&#8217;,
+ask me about positions!</li>
+
+<li>If the searcher is only looking for &#8216;freedom&#8217; in isolation,
+that&#8217;s where it stops.
+It now knows enough to assign the documents scores against &#8220;freedom&#8221;,
+with the 8-freedom document likely ranking higher than the single-freedom document.</li>
 </ul>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/FileLocking.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/FileLocking.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/FileLocking.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/FileLocking.mdtext Fri Feb 26 13:44:34 2016
@@ -65,23 +65,29 @@ LockFactory&#8217;s <a href="lucy:LockFa
 >lucy:LockFactory.Make_Shared_Lock</a> method exists for this reason; supplying an IndexManager instance to IndexReader&#8217;s constructor activates an internal locking mechanism using <a href="lucy:LockFactory.Make_Shared_Lock" class="podlinkurl"
 >lucy:LockFactory.Make_Shared_Lock</a> which prevents concurrent indexing processes from deleting files that are needed by active readers.</p>
 
-<pre>    use Sys::Hostname qw( hostname );
-    my $hostname = hostname() or die &#34;Can&#39;t get unique hostname&#34;;
-    my $manager = Lucy::Index::IndexManager-&#62;new( host =&#62; $hostname );
-    
-    # Index time:
-    my $indexer = Lucy::Index::Indexer-&#62;new(
-        index   =&#62; &#39;/path/to/index&#39;,
-        manager =&#62; $manager,
-    );
-    
-    # Search time:
-    my $reader = Lucy::Index::IndexReader-&#62;open(
-        index   =&#62; &#39;/path/to/index&#39;,
-        manager =&#62; $manager,
-    );
-    my $searcher = Lucy::Search::IndexSearcher-&#62;new( index =&#62; $reader );</pre>
+<pre>use Sys::Hostname qw( hostname );
+my $hostname = hostname() or die &#34;Can&#39;t get unique hostname&#34;;
+my $manager = Lucy::Index::IndexManager-&#62;new( host =&#62; $hostname );
 
-<p>Since shared locks are implemented using lockfiles located in the index directory (as are exclusive locks), reader applications must have write access for read locking to work. Stale lock files from crashed processes are ordinarily cleared away the next time the same machine &#8211; as identified by the <code>host</code> parameter &#8211; opens another IndexReader. (The classic technique of timing out lock files is not feasible because search processes may lie dormant indefinitely.) However, please be aware that if the last thing a given machine does is crash, lock files belonging to it may persist, preventing deletion of obsolete index data.</p>
+# Index time:
+my $indexer = Lucy::Index::Indexer-&#62;new(
+    index   =&#62; &#39;/path/to/index&#39;,
+    manager =&#62; $manager,
+);
+
+# Search time:
+my $reader = Lucy::Index::IndexReader-&#62;open(
+    index   =&#62; &#39;/path/to/index&#39;,
+    manager =&#62; $manager,
+);
+my $searcher = Lucy::Search::IndexSearcher-&#62;new( index =&#62; $reader );</pre>
+
+<p>Since shared locks are implemented using lockfiles located in the index directory (as are exclusive locks),
+reader applications must have write access for read locking to work.
+Stale lock files from crashed processes are ordinarily cleared away the next time the same machine &#8211; as identified by the <code>host</code> parameter &#8211; opens another IndexReader.
+(The classic technique of timing out lock files is not feasible because search processes may lie dormant indefinitely.) However,
+please be aware that if the last thing a given machine does is crash,
+lock files belonging to it may persist,
+preventing deletion of obsolete index data.</p>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial.mdtext Fri Feb 26 13:44:34 2016
@@ -54,9 +54,9 @@ name="Source_materials"
 <p>The source material used by the tutorial app &#8211; a multi-text-file presentation of the United States constitution &#8211; can be found in the <code>sample</code> directory at the root of the Lucy distribution,
 along with finished indexing and search apps.</p>
 
-<pre>    sample/indexer.pl        # indexing app
-    sample/search.cgi        # search app
-    sample/us_constitution   # corpus</pre>
+<pre>sample/indexer.pl        # indexing app
+sample/search.cgi        # search app
+sample/us_constitution   # corpus</pre>
 
 <h3><a class='u'
 name="Conventions"
@@ -64,7 +64,8 @@ name="Conventions"
 
 <p>The user is expected to be familiar with OO Perl and basic CGI programming.</p>
 
-<p>The code in this tutorial assumes a Unix-flavored operating system and the Apache webserver, but will work with minor modifications on other setups.</p>
+<p>The code in this tutorial assumes a Unix-flavored operating system and the Apache webserver,
+but will work with minor modifications on other setups.</p>
 
 <h3><a class='u'
 name="See_also"

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/AnalysisTutorial.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/AnalysisTutorial.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/AnalysisTutorial.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/AnalysisTutorial.mdtext Fri Feb 26 13:44:34 2016
@@ -16,65 +16,92 @@ name="DESCRIPTION"
 <p>Try swapping out the EasyAnalyzer in our Schema for a <a href="../../../Lucy/Analysis/StandardTokenizer.html" class="podlinkpod"
 >StandardTokenizer</a>:</p>
 
-<pre>    my $tokenizer = Lucy::Analysis::StandardTokenizer-&#62;new;
-    my $type = Lucy::Plan::FullTextType-&#62;new(
-        analyzer =&#62; $tokenizer,
-    );</pre>
-
-<p>Search for <code>senate</code>, <code>Senate</code>, and <code>Senator</code> before and after making the change and re-indexing.</p>
-
-<p>Under EasyAnalyzer, the results are identical for all three searches, but under StandardTokenizer, searches are case-sensitive, and the result sets for <code>Senate</code> and <code>Senator</code> are distinct.</p>
+<pre>my $tokenizer = Lucy::Analysis::StandardTokenizer-&#62;new;
+my $type = Lucy::Plan::FullTextType-&#62;new(
+    analyzer =&#62; $tokenizer,
+);</pre>
+
+<p>Search for <code>senate</code>,
+<code>Senate</code>,
+and <code>Senator</code> before and after making the change and re-indexing.</p>
+
+<p>Under EasyAnalyzer,
+the results are identical for all three searches,
+but under StandardTokenizer,
+searches are case-sensitive,
+and the result sets for <code>Senate</code> and <code>Senator</code> are distinct.</p>
 
 <h3><a class='u'
 name="EasyAnalyzer"
 >EasyAnalyzer</a></h3>
 
 <p>What&#8217;s happening is that <a href="../../../Lucy/Analysis/EasyAnalyzer.html" class="podlinkpod"
->EasyAnalyzer</a> is performing more aggressive processing than StandardTokenizer. In addition to tokenizing, it&#8217;s also converting all text to lower case so that searches are case-insensitive, and using a &#8220;stemming&#8221; algorithm to reduce related words to a common stem (<code>senat</code>, in this case).</p>
-
-<p>EasyAnalyzer is actually multiple Analyzers wrapped up in a single package. In this case, it&#8217;s three-in-one, since specifying a EasyAnalyzer with <code>language =&#62; &#39;en&#39;</code> is equivalent to this snippet creating a <a href="../../../Lucy/Analysis/PolyAnalyzer.html" class="podlinkpod"
+>EasyAnalyzer</a> is performing more aggressive processing than StandardTokenizer.
+In addition to tokenizing,
+it&#8217;s also converting all text to lower case so that searches are case-insensitive,
+and using a &#8220;stemming&#8221; algorithm to reduce related words to a common stem (<code>senat</code>,
+in this case).</p>
+
+<p>EasyAnalyzer is actually multiple Analyzers wrapped up in a single package.
+In this case,
+it&#8217;s three-in-one,
+since specifying a EasyAnalyzer with <code>language =&#62; &#39;en&#39;</code> is equivalent to this snippet creating a <a href="../../../Lucy/Analysis/PolyAnalyzer.html" class="podlinkpod"
 >PolyAnalyzer</a>:</p>
 
-<pre>    my $tokenizer    = Lucy::Analysis::StandardTokenizer-&#62;new;
-    my $normalizer   = Lucy::Analysis::Normalizer-&#62;new;
-    my $stemmer      = Lucy::Analysis::SnowballStemmer-&#62;new( language =&#62; &#39;en&#39; );
-    my $polyanalyzer = Lucy::Analysis::PolyAnalyzer-&#62;new(
-        analyzers =&#62; [ $tokenizer, $normalizer, $stemmer ],
-    );</pre>
-
-<p>You can add or subtract Analyzers from there if you like. Try adding a fourth Analyzer, a SnowballStopFilter for suppressing &#8220;stopwords&#8221; like &#8220;the&#8221;, &#8220;if&#8221;, and &#8220;maybe&#8221;.</p>
-
-<pre>    my $stopfilter = Lucy::Analysis::SnowballStopFilter-&#62;new( 
-        language =&#62; &#39;en&#39;,
-    );
-    my $polyanalyzer = Lucy::Analysis::PolyAnalyzer-&#62;new(
-        analyzers =&#62; [ $tokenizer, $normalizer, $stopfilter, $stemmer ],
-    );</pre>
-
-<p>Also, try removing the SnowballStemmer.</p>
-
-<pre>    my $polyanalyzer = Lucy::Analysis::PolyAnalyzer-&#62;new(
-        analyzers =&#62; [ $tokenizer, $normalizer ],
-    );</pre>
+<pre>my $tokenizer    = Lucy::Analysis::StandardTokenizer-&#62;new;
+my $normalizer   = Lucy::Analysis::Normalizer-&#62;new;
+my $stemmer      = Lucy::Analysis::SnowballStemmer-&#62;new( language =&#62; &#39;en&#39; );
+my $polyanalyzer = Lucy::Analysis::PolyAnalyzer-&#62;new(
+    analyzers =&#62; [ $tokenizer, $normalizer, $stemmer ],
+);</pre>
+
+<p>You can add or subtract Analyzers from there if you like.
+Try adding a fourth Analyzer,
+a SnowballStopFilter for suppressing &#8220;stopwords&#8221; like &#8220;the&#8221;,
+&#8220;if&#8221;,
+and &#8220;maybe&#8221;.</p>
+
+<pre>my $stopfilter = Lucy::Analysis::SnowballStopFilter-&#62;new( 
+    language =&#62; &#39;en&#39;,
+);
+my $polyanalyzer = Lucy::Analysis::PolyAnalyzer-&#62;new(
+    analyzers =&#62; [ $tokenizer, $normalizer, $stopfilter, $stemmer ],
+);</pre>
+
+<p>Also,
+try removing the SnowballStemmer.</p>
+
+<pre>my $polyanalyzer = Lucy::Analysis::PolyAnalyzer-&#62;new(
+    analyzers =&#62; [ $tokenizer, $normalizer ],
+);</pre>
 
-<p>The original choice of a stock English EasyAnalyzer probably still yields the best results for this document collection, but you get the idea: sometimes you want a different Analyzer.</p>
+<p>The original choice of a stock English EasyAnalyzer probably still yields the best results for this document collection,
+but you get the idea: sometimes you want a different Analyzer.</p>
 
 <h3><a class='u'
 name="When_the_best_Analyzer_is_no_Analyzer"
 >When the best Analyzer is no Analyzer</a></h3>
 
-<p>Sometimes you don&#8217;t want an Analyzer at all. That was true for our &#8220;url&#8221; field because we didn&#8217;t need it to be searchable, but it&#8217;s also true for certain types of searchable fields. For instance, &#8220;category&#8221; fields are often set up to match exactly or not at all, as are fields like &#8220;last_name&#8221; (because you may not want to conflate results for &#8220;Humphrey&#8221; and &#8220;Humphries&#8221;).</p>
+<p>Sometimes you don&#8217;t want an Analyzer at all.
+That was true for our &#8220;url&#8221; field because we didn&#8217;t need it to be searchable,
+but it&#8217;s also true for certain types of searchable fields.
+For instance,
+&#8220;category&#8221; fields are often set up to match exactly or not at all,
+as are fields like &#8220;last_name&#8221; (because you may not want to conflate results for &#8220;Humphrey&#8221; and &#8220;Humphries&#8221;).</p>
 
-<p>To specify that there should be no analysis performed at all, use StringType:</p>
+<p>To specify that there should be no analysis performed at all,
+use StringType:</p>
 
-<pre>    my $type = Lucy::Plan::StringType-&#62;new;
-    $schema-&#62;spec_field( name =&#62; &#39;category&#39;, type =&#62; $type );</pre>
+<pre>my $type = Lucy::Plan::StringType-&#62;new;
+$schema-&#62;spec_field( name =&#62; &#39;category&#39;, type =&#62; $type );</pre>
 
 <h3><a class='u'
 name="Highlighting_up_next"
 >Highlighting up next</a></h3>
 
-<p>In our next tutorial chapter, <a href="../../../Lucy/Docs/Tutorial/HighlighterTutorial.html" class="podlinkpod"
->HighlighterTutorial</a>, we&#8217;ll add highlighted excerpts from the &#8220;content&#8221; field to our search results.</p>
+<p>In our next tutorial chapter,
+<a href="../../../Lucy/Docs/Tutorial/HighlighterTutorial.html" class="podlinkpod"
+>HighlighterTutorial</a>,
+we&#8217;ll add highlighted excerpts from the &#8220;content&#8221; field to our search results.</p>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/BeyondSimpleTutorial.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/BeyondSimpleTutorial.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/BeyondSimpleTutorial.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/BeyondSimpleTutorial.mdtext Fri Feb 26 13:44:34 2016
@@ -51,89 +51,108 @@ name="Adaptations_to_indexer.pl"
 
 <p>After we load our modules&#8230;</p>
 
-<pre>    use Lucy::Plan::Schema;
-    use Lucy::Plan::FullTextType;
-    use Lucy::Analysis::EasyAnalyzer;
-    use Lucy::Index::Indexer;</pre>
+<pre>use Lucy::Plan::Schema;
+use Lucy::Plan::FullTextType;
+use Lucy::Analysis::EasyAnalyzer;
+use Lucy::Index::Indexer;</pre>
 
 <p>&#8230; the first item we&#8217;re going need is a <a href="../../../Lucy/Plan/Schema.html" class="podlinkpod"
 >Schema</a>.</p>
 
-<p>The primary job of a Schema is to specify what fields are available and how they&#8217;re defined. We&#8217;ll start off with three fields: title, content and url.</p>
-
-<pre>    # Create Schema.
-    my $schema = Lucy::Plan::Schema-&#62;new;
-    my $easyanalyzer = Lucy::Analysis::EasyAnalyzer-&#62;new(
-        language =&#62; &#39;en&#39;,
-    );
-    my $type = Lucy::Plan::FullTextType-&#62;new(
-        analyzer =&#62; $easyanalyzer,
-    );
-    $schema-&#62;spec_field( name =&#62; &#39;title&#39;,   type =&#62; $type );
-    $schema-&#62;spec_field( name =&#62; &#39;content&#39;, type =&#62; $type );
-    $schema-&#62;spec_field( name =&#62; &#39;url&#39;,     type =&#62; $type );</pre>
+<p>The primary job of a Schema is to specify what fields are available and how they&#8217;re defined.
+We&#8217;ll start off with three fields: title,
+content and url.</p>
+
+<pre># Create Schema.
+my $schema = Lucy::Plan::Schema-&#62;new;
+my $easyanalyzer = Lucy::Analysis::EasyAnalyzer-&#62;new(
+    language =&#62; &#39;en&#39;,
+);
+my $type = Lucy::Plan::FullTextType-&#62;new(
+    analyzer =&#62; $easyanalyzer,
+);
+$schema-&#62;spec_field( name =&#62; &#39;title&#39;,   type =&#62; $type );
+$schema-&#62;spec_field( name =&#62; &#39;content&#39;, type =&#62; $type );
+$schema-&#62;spec_field( name =&#62; &#39;url&#39;,     type =&#62; $type );</pre>
 
 <p>All of the fields are spec&#8217;d out using the <a href="../../../Lucy/Plan/FullTextType.html" class="podlinkpod"
->FullTextType</a> FieldType, indicating that they will be searchable as &#8220;full text&#8221; &#8211; which means that they can be searched for individual words. The &#8220;analyzer&#8221;, which is unique to FullTextType fields, is what breaks up the text into searchable tokens.</p>
-
-<p>Next, we&#8217;ll swap our Lucy::Simple object out for an <a href="../../../Lucy/Index/Indexer.html" class="podlinkpod"
->Indexer</a>. The substitution will be straightforward because Simple has merely been serving as a thin wrapper around an inner Indexer, and we&#8217;ll just be peeling away the wrapper.</p>
-
-<p>First, replace the constructor:</p>
-
-<pre>    # Create Indexer.
-    my $indexer = Lucy::Index::Indexer-&#62;new(
-        index    =&#62; $path_to_index,
-        schema   =&#62; $schema,
-        create   =&#62; 1,
-        truncate =&#62; 1,
-    );</pre>
+>FullTextType</a> FieldType,
+indicating that they will be searchable as &#8220;full text&#8221; &#8211; which means that they can be searched for individual words.
+The &#8220;analyzer&#8221;,
+which is unique to FullTextType fields,
+is what breaks up the text into searchable tokens.</p>
+
+<p>Next,
+we&#8217;ll swap our Lucy::Simple object out for an <a href="../../../Lucy/Index/Indexer.html" class="podlinkpod"
+>Indexer</a>.
+The substitution will be straightforward because Simple has merely been serving as a thin wrapper around an inner Indexer,
+and we&#8217;ll just be peeling away the wrapper.</p>
+
+<p>First,
+replace the constructor:</p>
+
+<pre># Create Indexer.
+my $indexer = Lucy::Index::Indexer-&#62;new(
+    index    =&#62; $path_to_index,
+    schema   =&#62; $schema,
+    create   =&#62; 1,
+    truncate =&#62; 1,
+);</pre>
 
-<p>Next, have the <code>indexer</code> object <a href="../../../Lucy/Index/Indexer.html#add_doc" class="podlinkpod"
+<p>Next,
+have the <code>indexer</code> object <a href="../../../Lucy/Index/Indexer.html#add_doc" class="podlinkpod"
 >add_doc()</a> where we were having the <code>lucy</code> object adding the document before:</p>
 
-<pre>    foreach my $filename (@filenames) {
-        my $doc = parse_file($filename);
-        $indexer-&#62;add_doc($doc);
-    }</pre>
+<pre>foreach my $filename (@filenames) {
+    my $doc = parse_file($filename);
+    $indexer-&#62;add_doc($doc);
+}</pre>
+
+<p>There&#8217;s only one extra step required: at the end of the app,
+you must call commit() explicitly to close the indexing session and commit your changes.
+(Lucy::Simple hides this detail,
+calling commit() implicitly when it needs to).</p>
 
-<p>There&#8217;s only one extra step required: at the end of the app, you must call commit() explicitly to close the indexing session and commit your changes. (Lucy::Simple hides this detail, calling commit() implicitly when it needs to).</p>
-
-<pre>    $indexer-&#62;commit;</pre>
+<pre>$indexer-&#62;commit;</pre>
 
 <h3><a class='u'
 name="Adaptations_to_search.cgi"
 >Adaptations to search.cgi</a></h3>
 
-<p>In our search app as in our indexing app, Lucy::Simple has served as a thin wrapper &#8211; this time around <a href="../../../Lucy/Search/IndexSearcher.html" class="podlinkpod"
+<p>In our search app as in our indexing app,
+Lucy::Simple has served as a thin wrapper &#8211; this time around <a href="../../../Lucy/Search/IndexSearcher.html" class="podlinkpod"
 >IndexSearcher</a> and <a href="../../../Lucy/Search/Hits.html" class="podlinkpod"
->Hits</a>. Swapping out Simple for these two classes is also straightforward:</p>
+>Hits</a>.
+Swapping out Simple for these two classes is also straightforward:</p>
+
+<pre>use Lucy::Search::IndexSearcher;
+
+my $searcher = Lucy::Search::IndexSearcher-&#62;new( 
+    index =&#62; $path_to_index,
+);
+my $hits = $searcher-&#62;hits(    # returns a Hits object, not a hit count
+    query      =&#62; $q,
+    offset     =&#62; $offset,
+    num_wanted =&#62; $page_size,
+);
+my $hit_count = $hits-&#62;total_hits;  # get the hit count here
+
+...
 
-<pre>    use Lucy::Search::IndexSearcher;
-    
-    my $searcher = Lucy::Search::IndexSearcher-&#62;new( 
-        index =&#62; $path_to_index,
-    );
-    my $hits = $searcher-&#62;hits(    # returns a Hits object, not a hit count
-        query      =&#62; $q,
-        offset     =&#62; $offset,
-        num_wanted =&#62; $page_size,
-    );
-    my $hit_count = $hits-&#62;total_hits;  # get the hit count here
-    
+while ( my $hit = $hits-&#62;next ) {
     ...
-    
-    while ( my $hit = $hits-&#62;next ) {
-        ...
-    }</pre>
+}</pre>
 
 <h3><a class='u'
 name="Hooray!"
 >Hooray!</a></h3>
 
-<p>Congratulations! Your apps do the same thing as before&#8230; but now they&#8217;ll be easier to customize.</p>
+<p>Congratulations!
+Your apps do the same thing as before&#8230; but now they&#8217;ll be easier to customize.</p>
 
-<p>In our next chapter, <a href="../../../Lucy/Docs/Tutorial/FieldTypeTutorial.html" class="podlinkpod"
->FieldTypeTutorial</a>, we&#8217;ll explore how to assign different behaviors to different fields.</p>
+<p>In our next chapter,
+<a href="../../../Lucy/Docs/Tutorial/FieldTypeTutorial.html" class="podlinkpod"
+>FieldTypeTutorial</a>,
+we&#8217;ll explore how to assign different behaviors to different fields.</p>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/FieldTypeTutorial.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/FieldTypeTutorial.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/FieldTypeTutorial.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/FieldTypeTutorial.mdtext Fri Feb 26 13:44:34 2016
@@ -15,49 +15,67 @@ name="DESCRIPTION"
 
 <p>The Schema we used in the last chapter specifies three fields:</p>
 
-<pre>    my $type = Lucy::Plan::FullTextType-&#62;new(
-        analyzer =&#62; $easyanalyzer,
-    );
-    $schema-&#62;spec_field( name =&#62; &#39;title&#39;,   type =&#62; $type );
-    $schema-&#62;spec_field( name =&#62; &#39;content&#39;, type =&#62; $type );
-    $schema-&#62;spec_field( name =&#62; &#39;url&#39;,     type =&#62; $type );</pre>
+<pre>my $type = Lucy::Plan::FullTextType-&#62;new(
+    analyzer =&#62; $easyanalyzer,
+);
+$schema-&#62;spec_field( name =&#62; &#39;title&#39;,   type =&#62; $type );
+$schema-&#62;spec_field( name =&#62; &#39;content&#39;, type =&#62; $type );
+$schema-&#62;spec_field( name =&#62; &#39;url&#39;,     type =&#62; $type );</pre>
+
+<p>Since they are all defined as &#8220;full text&#8221; fields,
+they are all searchable &#8211; including the <code>url</code> field,
+a dubious choice.
+Some URLs contain meaningful information,
+but these don&#8217;t,
+really:</p>
 
-<p>Since they are all defined as &#8220;full text&#8221; fields, they are all searchable &#8211; including the <code>url</code> field, a dubious choice. Some URLs contain meaningful information, but these don&#8217;t, really:</p>
+<pre>http://example.com/us_constitution/amend1.txt</pre>
 
-<pre>    http://example.com/us_constitution/amend1.txt</pre>
-
-<p>We may as well not bother indexing the URL content. To achieve that we need to assign the <code>url</code> field to a different FieldType.</p>
+<p>We may as well not bother indexing the URL content.
+To achieve that we need to assign the <code>url</code> field to a different FieldType.</p>
 
 <h3><a class='u'
 name="StringType"
 >StringType</a></h3>
 
-<p>Instead of FullTextType, we&#8217;ll use a <a href="../../../Lucy/Plan/StringType.html" class="podlinkpod"
->StringType</a>, which doesn&#8217;t use an Analyzer to break up text into individual fields. Furthermore, we&#8217;ll mark this StringType as unindexed, so that its content won&#8217;t be searchable at all.</p>
+<p>Instead of FullTextType,
+we&#8217;ll use a <a href="../../../Lucy/Plan/StringType.html" class="podlinkpod"
+>StringType</a>,
+which doesn&#8217;t use an Analyzer to break up text into individual fields.
+Furthermore,
+we&#8217;ll mark this StringType as unindexed,
+so that its content won&#8217;t be searchable at all.</p>
 
-<pre>    my $url_type = Lucy::Plan::StringType-&#62;new( indexed =&#62; 0 );
-    $schema-&#62;spec_field( name =&#62; &#39;url&#39;, type =&#62; $url_type );</pre>
+<pre>my $url_type = Lucy::Plan::StringType-&#62;new( indexed =&#62; 0 );
+$schema-&#62;spec_field( name =&#62; &#39;url&#39;, type =&#62; $url_type );</pre>
 
-<p>To observe the change in behavior, try searching for <code>us_constitution</code> both before and after changing the Schema and re-indexing.</p>
+<p>To observe the change in behavior,
+try searching for <code>us_constitution</code> both before and after changing the Schema and re-indexing.</p>
 
 <h3><a class='u'
 name="Toggling_(8216)stored(8217)"
 >Toggling &#8216;stored&#8217;</a></h3>
 
-<p>For a taste of other FieldType possibilities, try turning off <code>stored</code> for one or more fields.</p>
-
-<pre>    my $content_type = Lucy::Plan::FullTextType-&#62;new(
-        analyzer =&#62; $easyanalyzer,
-        stored   =&#62; 0,
-    );</pre>
+<p>For a taste of other FieldType possibilities,
+try turning off <code>stored</code> for one or more fields.</p>
 
-<p>Turning off <code>stored</code> for either <code>title</code> or <code>url</code> mangles our results page, but since we&#8217;re not displaying <code>content</code>, turning it off for <code>content</code> has no effect &#8211; except on index size.</p>
+<pre>my $content_type = Lucy::Plan::FullTextType-&#62;new(
+    analyzer =&#62; $easyanalyzer,
+    stored   =&#62; 0,
+);</pre>
+
+<p>Turning off <code>stored</code> for either <code>title</code> or <code>url</code> mangles our results page,
+but since we&#8217;re not displaying <code>content</code>,
+turning it off for <code>content</code> has no effect &#8211; except on index size.</p>
 
 <h3><a class='u'
 name="Analyzers_up_next"
 >Analyzers up next</a></h3>
 
-<p>Analyzers play a crucial role in the behavior of FullTextType fields. In our next tutorial chapter, <a href="../../../Lucy/Docs/Tutorial/AnalysisTutorial.html" class="podlinkpod"
->AnalysisTutorial</a>, we&#8217;ll see how changing up the Analyzer changes search results.</p>
+<p>Analyzers play a crucial role in the behavior of FullTextType fields.
+In our next tutorial chapter,
+<a href="../../../Lucy/Docs/Tutorial/AnalysisTutorial.html" class="podlinkpod"
+>AnalysisTutorial</a>,
+we&#8217;ll see how changing up the Analyzer changes search results.</p>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/HighlighterTutorial.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/HighlighterTutorial.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/HighlighterTutorial.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/HighlighterTutorial.mdtext Fri Feb 26 13:44:34 2016
@@ -25,49 +25,52 @@ name="Adaptations_to_indexer.pl"
 To save resources,
 highlighting is disabled by default and must be turned on for individual fields.</p>
 
-<pre>    my $highlightable = Lucy::Plan::FullTextType-&#62;new(
-        analyzer      =&#62; $easyanalyzer,
-        highlightable =&#62; 1,
-    );
-    $schema-&#62;spec_field( name =&#62; &#39;content&#39;, type =&#62; $highlightable );</pre>
+<pre>my $highlightable = Lucy::Plan::FullTextType-&#62;new(
+    analyzer      =&#62; $easyanalyzer,
+    highlightable =&#62; 1,
+);
+$schema-&#62;spec_field( name =&#62; &#39;content&#39;, type =&#62; $highlightable );</pre>
 
 <h3><a class='u'
 name="Adaptations_to_search.cgi"
 >Adaptations to search.cgi</a></h3>
 
-<p>To add highlighting and excerpting to the search.cgi sample app, create a <code>$highlighter</code> object outside the hits iterating loop&#8230;</p>
+<p>To add highlighting and excerpting to the search.cgi sample app,
+create a <code>$highlighter</code> object outside the hits iterating loop&#8230;</p>
 
-<pre>    my $highlighter = Lucy::Highlight::Highlighter-&#62;new(
-        searcher =&#62; $searcher,
-        query    =&#62; $q,
-        field    =&#62; &#39;content&#39;
-    );</pre>
+<pre>my $highlighter = Lucy::Highlight::Highlighter-&#62;new(
+    searcher =&#62; $searcher,
+    query    =&#62; $q,
+    field    =&#62; &#39;content&#39;
+);</pre>
 
 <p>&#8230; then modify the loop and the per-hit display to generate and include the excerpt.</p>
 
-<pre>    # Create result list.
-    my $report = &#39;&#39;;
-    while ( my $hit = $hits-&#62;next ) {
-        my $score   = sprintf( &#34;%0.3f&#34;, $hit-&#62;get_score );
-        my $excerpt = $highlighter-&#62;create_excerpt($hit);
-        $report .= qq|
-            &#60;p&#62;
-              &#60;a href=&#34;$hit-&#62;{url}&#34;&#62;&#60;strong&#62;$hit-&#62;{title}&#60;/strong&#62;&#60;/a&#62;
-              &#60;em&#62;$score&#60;/em&#62;
-              &#60;br /&#62;
-              $excerpt
-              &#60;br /&#62;
-              &#60;span class=&#34;excerptURL&#34;&#62;$hit-&#62;{url}&#60;/span&#62;
-            &#60;/p&#62;
-        |;
-    }</pre>
+<pre># Create result list.
+my $report = &#39;&#39;;
+while ( my $hit = $hits-&#62;next ) {
+    my $score   = sprintf( &#34;%0.3f&#34;, $hit-&#62;get_score );
+    my $excerpt = $highlighter-&#62;create_excerpt($hit);
+    $report .= qq|
+        &#60;p&#62;
+          &#60;a href=&#34;$hit-&#62;{url}&#34;&#62;&#60;strong&#62;$hit-&#62;{title}&#60;/strong&#62;&#60;/a&#62;
+          &#60;em&#62;$score&#60;/em&#62;
+          &#60;br /&#62;
+          $excerpt
+          &#60;br /&#62;
+          &#60;span class=&#34;excerptURL&#34;&#62;$hit-&#62;{url}&#60;/span&#62;
+        &#60;/p&#62;
+    |;
+}</pre>
 
 <h3><a class='u'
 name="Next_chapter:_Query_objects"
 >Next chapter: Query objects</a></h3>
 
-<p>Our next tutorial chapter, <a href="../../../Lucy/Docs/Tutorial/QueryObjectsTutorial.html" class="podlinkpod"
->QueryObjectsTutorial</a>, illustrates how to build an &#8220;advanced search&#8221; interface using <a href="../../../Lucy/Search/Query.html" class="podlinkpod"
+<p>Our next tutorial chapter,
+<a href="../../../Lucy/Docs/Tutorial/QueryObjectsTutorial.html" class="podlinkpod"
+>QueryObjectsTutorial</a>,
+illustrates how to build an &#8220;advanced search&#8221; interface using <a href="../../../Lucy/Search/Query.html" class="podlinkpod"
 >Query</a> objects instead of query strings.</p>
 
 </div>

Modified: lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/QueryObjectsTutorial.mdtext
URL: http://svn.apache.org/viewvc/lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/QueryObjectsTutorial.mdtext?rev=1732480&r1=1732479&r2=1732480&view=diff
==============================================================================
--- lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/QueryObjectsTutorial.mdtext (original)
+++ lucy/site/trunk/content/docs/test/Lucy/Docs/Tutorial/QueryObjectsTutorial.mdtext Fri Feb 26 13:44:34 2016
@@ -42,22 +42,25 @@ It needs to be indexed,
 but since we won&#8217;t display its value,
 it doesn&#8217;t need to be stored.</p>
 
-<pre>    my $cat_type = Lucy::Plan::StringType-&#62;new( stored =&#62; 0 );
-    $schema-&#62;spec_field( name =&#62; &#39;category&#39;, type =&#62; $cat_type );</pre>
+<pre>my $cat_type = Lucy::Plan::StringType-&#62;new( stored =&#62; 0 );
+$schema-&#62;spec_field( name =&#62; &#39;category&#39;, type =&#62; $cat_type );</pre>
 
-<p>There will be three possible values: &#8220;article&#8221;, &#8220;amendment&#8221;, and &#8220;preamble&#8221;, which we&#8217;ll hack out of the source file&#8217;s name during our <code>parse_file</code> subroutine:</p>
-
-<pre>    my $category
-        = $filename =~ /art/      ? &#39;article&#39;
-        : $filename =~ /amend/    ? &#39;amendment&#39;
-        : $filename =~ /preamble/ ? &#39;preamble&#39;
-        :                           die &#34;Can&#39;t derive category for $filename&#34;;
-    return {
-        title    =&#62; $title,
-        content  =&#62; $bodytext,
-        url      =&#62; &#34;/us_constitution/$filename&#34;,
-        category =&#62; $category,
-    };</pre>
+<p>There will be three possible values: &#8220;article&#8221;,
+&#8220;amendment&#8221;,
+and &#8220;preamble&#8221;,
+which we&#8217;ll hack out of the source file&#8217;s name during our <code>parse_file</code> subroutine:</p>
+
+<pre>my $category
+    = $filename =~ /art/      ? &#39;article&#39;
+    : $filename =~ /amend/    ? &#39;amendment&#39;
+    : $filename =~ /preamble/ ? &#39;preamble&#39;
+    :                           die &#34;Can&#39;t derive category for $filename&#34;;
+return {
+    title    =&#62; $title,
+    content  =&#62; $bodytext,
+    url      =&#62; &#34;/us_constitution/$filename&#34;,
+    category =&#62; $category,
+};</pre>
 
 <h3><a class='u'
 name="Adaptations_to_search.cgi"
@@ -65,65 +68,72 @@ name="Adaptations_to_search.cgi"
 
 <p>The &#8220;category&#8221; constraint will be added to our search interface using an HTML &#8220;select&#8221; element (this routine will need to be integrated into the HTML generation section of search.cgi):</p>
 
-<pre>    # Build up the HTML &#34;select&#34; object for the &#34;category&#34; field.
-    sub generate_category_select {
-        my $cat = shift;
-        my $select = qq|
-          &#60;select name=&#34;category&#34;&#62;
-            &#60;option value=&#34;&#34;&#62;All Sections&#60;/option&#62;
-            &#60;option value=&#34;article&#34;&#62;Articles&#60;/option&#62;
-            &#60;option value=&#34;amendment&#34;&#62;Amendments&#60;/option&#62;
-          &#60;/select&#62;|;
-        if ($cat) {
-            $select =~ s/&#34;$cat&#34;/&#34;$cat&#34; selected/;
-        }
-        return $select;
-    }</pre>
+<pre># Build up the HTML &#34;select&#34; object for the &#34;category&#34; field.
+sub generate_category_select {
+    my $cat = shift;
+    my $select = qq|
+      &#60;select name=&#34;category&#34;&#62;
+        &#60;option value=&#34;&#34;&#62;All Sections&#60;/option&#62;
+        &#60;option value=&#34;article&#34;&#62;Articles&#60;/option&#62;
+        &#60;option value=&#34;amendment&#34;&#62;Amendments&#60;/option&#62;
+      &#60;/select&#62;|;
+    if ($cat) {
+        $select =~ s/&#34;$cat&#34;/&#34;$cat&#34; selected/;
+    }
+    return $select;
+}</pre>
 
 <p>We&#8217;ll start off by loading our new modules and extracting our new CGI parameter.</p>
 
-<pre>    use Lucy::Search::QueryParser;
-    use Lucy::Search::TermQuery;
-    use Lucy::Search::ANDQuery;
-    
-    ... 
-    
-    my $category = decode( &#34;UTF-8&#34;, $cgi-&#62;param(&#39;category&#39;) || &#39;&#39; );</pre>
-
-<p>QueryParser&#8217;s constructor requires a &#8220;schema&#8221; argument. We can get that from our IndexSearcher:</p>
-
-<pre>    # Create an IndexSearcher and a QueryParser.
-    my $searcher = Lucy::Search::IndexSearcher-&#62;new( 
-        index =&#62; $path_to_index, 
+<pre>use Lucy::Search::QueryParser;
+use Lucy::Search::TermQuery;
+use Lucy::Search::ANDQuery;
+
+... 
+
+my $category = decode( &#34;UTF-8&#34;, $cgi-&#62;param(&#39;category&#39;) || &#39;&#39; );</pre>
+
+<p>QueryParser&#8217;s constructor requires a &#8220;schema&#8221; argument.
+We can get that from our IndexSearcher:</p>
+
+<pre># Create an IndexSearcher and a QueryParser.
+my $searcher = Lucy::Search::IndexSearcher-&#62;new( 
+    index =&#62; $path_to_index, 
+);
+my $qparser  = Lucy::Search::QueryParser-&#62;new( 
+    schema =&#62; $searcher-&#62;get_schema,
+);</pre>
+
+<p>Previously,
+we have been handing raw query strings to IndexSearcher.
+Behind the scenes,
+IndexSearcher has been using a QueryParser to turn those query strings into Query objects.
+Now,
+we will bring QueryParser into the foreground and parse the strings explicitly.</p>
+
+<pre>my $query = $qparser-&#62;parse($q);</pre>
+
+<p>If the user has specified a category,
+we&#8217;ll use an ANDQuery to join our parsed query together with a TermQuery representing the category.</p>
+
+<pre>if ($category) {
+    my $category_query = Lucy::Search::TermQuery-&#62;new(
+        field =&#62; &#39;category&#39;, 
+        term  =&#62; $category,
     );
-    my $qparser  = Lucy::Search::QueryParser-&#62;new( 
-        schema =&#62; $searcher-&#62;get_schema,
-    );</pre>
-
-<p>Previously, we have been handing raw query strings to IndexSearcher. Behind the scenes, IndexSearcher has been using a QueryParser to turn those query strings into Query objects. Now, we will bring QueryParser into the foreground and parse the strings explicitly.</p>
-
-<pre>    my $query = $qparser-&#62;parse($q);</pre>
-
-<p>If the user has specified a category, we&#8217;ll use an ANDQuery to join our parsed query together with a TermQuery representing the category.</p>
-
-<pre>    if ($category) {
-        my $category_query = Lucy::Search::TermQuery-&#62;new(
-            field =&#62; &#39;category&#39;, 
-            term  =&#62; $category,
-        );
-        $query = Lucy::Search::ANDQuery-&#62;new(
-            children =&#62; [ $query, $category_query ]
-        );
-    }</pre>
+    $query = Lucy::Search::ANDQuery-&#62;new(
+        children =&#62; [ $query, $category_query ]
+    );
+}</pre>
 
 <p>Now when we execute the query&#8230;</p>
 
-<pre>    # Execute the Query and get a Hits object.
-    my $hits = $searcher-&#62;hits(
-        query      =&#62; $query,
-        offset     =&#62; $offset,
-        num_wanted =&#62; $page_size,
-    );</pre>
+<pre># Execute the Query and get a Hits object.
+my $hits = $searcher-&#62;hits(
+    query      =&#62; $query,
+    offset     =&#62; $offset,
+    num_wanted =&#62; $page_size,
+);</pre>
 
 <p>&#8230; we&#8217;ll get a result set which is the intersection of the parsed query and the category query.</p>
 
@@ -131,38 +141,42 @@ name="Adaptations_to_search.cgi"
 name="Using_TermQuery_with_full_text_fields"
 >Using TermQuery with full text fields</a></h3>
 
-<p>When querying full text fields, the easiest way is to create query objects using QueryParser. But sometimes you want to create TermQuery for a single term in a FullTextType field directly. In this case, we have to run the search term through the field&#8217;s analyzer to make sure it gets normalized in the same way as the field&#8217;s content.</p>
-
-<pre>    sub make_term_query {
-        my ($field, $term) = @_;
-    
-        my $token;
-        my $type = $schema-&#62;fetch_type($field);
-    
-        if ( $type-&#62;isa(&#39;Lucy::Plan::FullTextType&#39;) ) {
-            # Run the term through the full text analysis chain.
-            my $analyzer = $type-&#62;get_analyzer;
-            my $tokens   = $analyzer-&#62;split($term);
-    
-            if ( @$tokens != 1 ) {
-                # If the term expands to more than one token, or no
-                # tokens at all, it will never match a token in the
-                # full text field.
-                return Lucy::Search::NoMatchQuery-&#62;new;
-            }
-    
-            $token = $tokens-&#62;[0];
+<p>When querying full text fields,
+the easiest way is to create query objects using QueryParser.
+But sometimes you want to create TermQuery for a single term in a FullTextType field directly.
+In this case,
+we have to run the search term through the field&#8217;s analyzer to make sure it gets normalized in the same way as the field&#8217;s content.</p>
+
+<pre>sub make_term_query {
+    my ($field, $term) = @_;
+
+    my $token;
+    my $type = $schema-&#62;fetch_type($field);
+
+    if ( $type-&#62;isa(&#39;Lucy::Plan::FullTextType&#39;) ) {
+        # Run the term through the full text analysis chain.
+        my $analyzer = $type-&#62;get_analyzer;
+        my $tokens   = $analyzer-&#62;split($term);
+
+        if ( @$tokens != 1 ) {
+            # If the term expands to more than one token, or no
+            # tokens at all, it will never match a token in the
+            # full text field.
+            return Lucy::Search::NoMatchQuery-&#62;new;
         }
-        else {
-            # Exact match for other types.
-            $token = $term;
-        }
-    
-        return Lucy::Search::TermQuery-&#62;new(
-            field =&#62; $field,
-            term  =&#62; $token,
-        );
-    }</pre>
+
+        $token = $tokens-&#62;[0];
+    }
+    else {
+        # Exact match for other types.
+        $token = $term;
+    }
+
+    return Lucy::Search::TermQuery-&#62;new(
+        field =&#62; $field,
+        term  =&#62; $token,
+    );
+}</pre>
 
 <h3><a class='u'
 name="Congratulations!"
@@ -174,11 +188,15 @@ name="Congratulations!"
 name="See_Also"
 >See Also</a></h3>
 
-<p>For additional thematic documentation, see the Apache Lucy <a href="../../../Lucy/Docs/Cookbook.html" class="podlinkpod"
+<p>For additional thematic documentation,
+see the Apache Lucy <a href="../../../Lucy/Docs/Cookbook.html" class="podlinkpod"
 >Cookbook</a>.</p>
 
-<p>ANDQuery has a companion class, <a href="../../../Lucy/Search/ORQuery.html" class="podlinkpod"
->ORQuery</a>, and a close relative, <a href="../../../Lucy/Search/RequiredOptionalQuery.html" class="podlinkpod"
+<p>ANDQuery has a companion class,
+<a href="../../../Lucy/Search/ORQuery.html" class="podlinkpod"
+>ORQuery</a>,
+and a close relative,
+<a href="../../../Lucy/Search/RequiredOptionalQuery.html" class="podlinkpod"
 >RequiredOptionalQuery</a>.</p>
 
 </div>