You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by dw...@apache.org on 2018/09/06 08:22:26 UTC
lucene-solr:branch_7x: Revert "LUCENE-8481: Javadocs should no longer
reference RAMDirectory."
Repository: lucene-solr
Updated Branches:
refs/heads/branch_7x 3cd58d130 -> fc8d9eba1
Revert "LUCENE-8481: Javadocs should no longer reference RAMDirectory."
This reverts commit 3cd58d130e403f11cbbd0cd2673a6a58da361854.
Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/fc8d9eba
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/fc8d9eba
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/fc8d9eba
Branch: refs/heads/branch_7x
Commit: fc8d9eba1ebf779e3cbda487bae854f7b17549b0
Parents: 3cd58d1
Author: Dawid Weiss <dw...@apache.org>
Authored: Thu Sep 6 10:22:04 2018 +0200
Committer: Dawid Weiss <dw...@apache.org>
Committed: Thu Sep 6 10:22:04 2018 +0200
----------------------------------------------------------------------
.../apache/lucene/collation/package-info.java | 21 +++---
lucene/analysis/icu/src/java/overview.html | 21 +++---
lucene/core/src/java/overview.html | 25 ++++---
.../src/test/org/apache/lucene/TestDemo.java | 69 +++++++++-----------
4 files changed, 65 insertions(+), 71 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fc8d9eba/lucene/analysis/common/src/java/org/apache/lucene/collation/package-info.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/collation/package-info.java b/lucene/analysis/common/src/java/org/apache/lucene/collation/package-info.java
index 5b83ea5..e56071a 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/collation/package-info.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/collation/package-info.java
@@ -49,14 +49,13 @@
* // "fa" Locale is not supported by Sun JDK 1.4 or 1.5
* Collator collator = Collator.getInstance(new Locale("ar"));
* CollationKeyAnalyzer analyzer = new CollationKeyAnalyzer(collator);
- * Path dirPath = Files.createTempDirectory("tempIndex");
- * Directory dir = FSDirectory.open(dirPath);
- * IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
+ * RAMDirectory ramDir = new RAMDirectory();
+ * IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(analyzer));
* Document doc = new Document();
* doc.add(new TextField("content", "\u0633\u0627\u0628", Field.Store.YES));
* writer.addDocument(doc);
* writer.close();
- * IndexReader ir = DirectoryReader.open(dir);
+ * IndexReader ir = DirectoryReader.open(ramDir);
* IndexSearcher is = new IndexSearcher(ir);
*
* QueryParser aqp = new QueryParser("content", analyzer);
@@ -76,9 +75,8 @@
* <pre class="prettyprint">
* Analyzer analyzer
* = new CollationKeyAnalyzer(Collator.getInstance(new Locale("da", "dk")));
- * Path dirPath = Files.createTempDirectory("tempIndex");
- * Directory dir = FSDirectory.open(dirPath);
- * IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
+ * RAMDirectory indexStore = new RAMDirectory();
+ * IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(analyzer));
* String[] tracer = new String[] { "A", "B", "C", "D", "E" };
* String[] data = new String[] { "HAT", "HUT", "H\u00C5T", "H\u00D8T", "HOT" };
* String[] sortedTracerOrder = new String[] { "A", "E", "B", "D", "C" };
@@ -89,7 +87,7 @@
* writer.addDocument(doc);
* }
* writer.close();
- * IndexReader ir = DirectoryReader.open(dir);
+ * IndexReader ir = DirectoryReader.open(indexStore);
* IndexSearcher searcher = new IndexSearcher(ir);
* Sort sort = new Sort();
* sort.setSort(new SortField("contents", SortField.STRING));
@@ -106,14 +104,13 @@
* Collator collator = Collator.getInstance(new Locale("tr", "TR"));
* collator.setStrength(Collator.PRIMARY);
* Analyzer analyzer = new CollationKeyAnalyzer(collator);
- * Path dirPath = Files.createTempDirectory("tempIndex");
- * Directory dir = FSDirectory.open(dirPath);
- * IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
+ * RAMDirectory ramDir = new RAMDirectory();
+ * IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(analyzer));
* Document doc = new Document();
* doc.add(new TextField("contents", "DIGY", Field.Store.NO));
* writer.addDocument(doc);
* writer.close();
- * IndexReader ir = DirectoryReader.open(dir);
+ * IndexReader ir = DirectoryReader.open(ramDir);
* IndexSearcher is = new IndexSearcher(ir);
* QueryParser parser = new QueryParser("contents", analyzer);
* Query query = parser.parse("d\u0131gy"); // U+0131: dotless i
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fc8d9eba/lucene/analysis/icu/src/java/overview.html
----------------------------------------------------------------------
diff --git a/lucene/analysis/icu/src/java/overview.html b/lucene/analysis/icu/src/java/overview.html
index 3af0247..6e0a5d7 100644
--- a/lucene/analysis/icu/src/java/overview.html
+++ b/lucene/analysis/icu/src/java/overview.html
@@ -115,15 +115,14 @@ algorithm.
<pre class="prettyprint">
Collator collator = Collator.getInstance(new ULocale("ar"));
ICUCollationKeyAnalyzer analyzer = new ICUCollationKeyAnalyzer(collator);
- Path indexPath = Files.createTempDirectory("tempIndex");
- Directory dir = FSDirectory.open(indexPath);
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
+ RAMDirectory ramDir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(analyzer));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628",
Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
- IndexSearcher is = new IndexSearcher(dir, true);
+ IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParser aqp = new QueryParser("content", analyzer);
aqp.setAnalyzeRangeTerms(true);
@@ -142,9 +141,8 @@ algorithm.
<pre class="prettyprint">
Analyzer analyzer
= new ICUCollationKeyAnalyzer(Collator.getInstance(new ULocale("da", "dk")));
- Path indexPath = Files.createTempDirectory("tempIndex");
- Directory dir = FSDirectory.open(indexPath);
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
+ RAMDirectory indexStore = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(analyzer));
String[] tracer = new String[] { "A", "B", "C", "D", "E" };
String[] data = new String[] { "HAT", "HUT", "H\u00C5T", "H\u00D8T", "HOT" };
String[] sortedTracerOrder = new String[] { "A", "E", "B", "D", "C" };
@@ -155,7 +153,7 @@ algorithm.
writer.addDocument(doc);
}
writer.close();
- IndexSearcher searcher = new IndexSearcher(dir, true);
+ IndexSearcher searcher = new IndexSearcher(indexStore, true);
Sort sort = new Sort();
sort.setSort(new SortField("contents", SortField.STRING));
Query query = new MatchAllDocsQuery();
@@ -171,14 +169,13 @@ algorithm.
Collator collator = Collator.getInstance(new ULocale("tr", "TR"));
collator.setStrength(Collator.PRIMARY);
Analyzer analyzer = new ICUCollationKeyAnalyzer(collator);
- Path indexPath = Files.createTempDirectory("tempIndex");
- Directory dir = FSDirectory.open(indexPath);
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(analyzer));
+ RAMDirectory ramDir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(analyzer));
Document doc = new Document();
doc.add(new Field("contents", "DIGY", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
- IndexSearcher is = new IndexSearcher(dir, true);
+ IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParser parser = new QueryParser("contents", analyzer);
Query query = parser.parse("d\u0131gy"); // U+0131: dotless i
ScoreDoc[] result = is.search(query, null, 1000).scoreDocs;
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fc8d9eba/lucene/core/src/java/overview.html
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/overview.html b/lucene/core/src/java/overview.html
index b4f5b81..b7112ac 100644
--- a/lucene/core/src/java/overview.html
+++ b/lucene/core/src/java/overview.html
@@ -24,14 +24,18 @@
Here's a simple example how to use Lucene for indexing and searching (using JUnit
to check if the results are what we expect):</p>
-<!-- code comes from org.apache.lucene.TestDemo.
- See LUCENE-8481 for reasons why it's out of sync with the code.
- -->
+<!-- code comes from org.apache.lucene.TestDemo: -->
+<!-- ======================================================== -->
+<!-- = Java Sourcecode to HTML automatically converted code = -->
+<!-- = Java2Html Converter 5.0 [2006-03-04] by Markus Gebhard markus@jave.de = -->
+<!-- = Further information: http://www.java2html.de = -->
<pre class="prettyprint">
Analyzer analyzer = new StandardAnalyzer();
- Path indexPath = Files.createTempDirectory("tempIndex");
- Directory directory = FSDirectory.open(indexPath)
+ // Store the index in memory:
+ Directory directory = new RAMDirectory();
+ // To store an index on disk, use this instead:
+ //Directory directory = FSDirectory.open("/tmp/testindex");
IndexWriterConfig config = new IndexWriterConfig(analyzer);
IndexWriter iwriter = new IndexWriter(directory, config);
Document doc = new Document();
@@ -46,7 +50,7 @@ to check if the results are what we expect):</p>
// Parse a simple query that searches for "text":
QueryParser parser = new QueryParser("fieldname", analyzer);
Query query = parser.parse("text");
- ScoreDoc[] hits = isearcher.search(query, 10).scoreDocs;
+ ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs;
assertEquals(1, hits.length);
// Iterate through the results:
for (int i = 0; i < hits.length; i++) {
@@ -54,8 +58,8 @@ to check if the results are what we expect):</p>
assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
}
ireader.close();
- directory.close();
- IOUtils.rm(indexPath);</pre>
+ directory.close();</pre>
+<!-- = END of automatically generated HTML code = -->
<!-- ======================================================== -->
@@ -108,8 +112,9 @@ query structures from strings or xml.
defines an abstract class for storing persistent data, the {@link org.apache.lucene.store.Directory Directory},
which is a collection of named files written by an {@link org.apache.lucene.store.IndexOutput IndexOutput}
and read by an {@link org.apache.lucene.store.IndexInput IndexInput}.
-Multiple implementations are provided, but {@link org.apache.lucene.store.FSDirectory FSDirectory} is generally
-recommended as it tries to use operating system disk buffer caches efficiently.</li>
+Multiple implementations are provided, including {@link org.apache.lucene.store.FSDirectory FSDirectory},
+which uses a file system directory to store files, and {@link org.apache.lucene.store.RAMDirectory RAMDirectory}
+which implements files as memory-resident data structures.</li>
<li>
<b>{@link org.apache.lucene.util}</b>
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/fc8d9eba/lucene/core/src/test/org/apache/lucene/TestDemo.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/TestDemo.java b/lucene/core/src/test/org/apache/lucene/TestDemo.java
index 28a415e..b4778d9 100644
--- a/lucene/core/src/test/org/apache/lucene/TestDemo.java
+++ b/lucene/core/src/test/org/apache/lucene/TestDemo.java
@@ -18,22 +18,17 @@ package org.apache.lucene;
import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
/**
@@ -45,40 +40,40 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestDemo extends LuceneTestCase {
public void testDemo() throws IOException {
+ Analyzer analyzer = new MockAnalyzer(random());
+
+ // Store the index in memory:
+ Directory directory = newDirectory();
+ // To store an index on disk, use this instead:
+ // Directory directory = FSDirectory.open(new File("/tmp/testindex"));
+ RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, analyzer);
+ Document doc = new Document();
String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
String text = "This is the text to be indexed. " + longTerm;
+ doc.add(newTextField("fieldname", text, Field.Store.YES));
+ iwriter.addDocument(doc);
+ iwriter.close();
+
+ // Now search the index:
+ IndexReader ireader = DirectoryReader.open(directory); // read-only=true
+ IndexSearcher isearcher = newSearcher(ireader);
- Path indexPath = Files.createTempDirectory("tempIndex");
- try (Directory dir = FSDirectory.open(indexPath)) {
- Analyzer analyzer = new StandardAnalyzer();
- try (IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer))) {
- Document doc = new Document();
- doc.add(newTextField("fieldname", text, Field.Store.YES));
- iw.addDocument(doc);
- }
-
- // Now search the index.
- try (IndexReader reader = DirectoryReader.open(dir)) {
- IndexSearcher searcher = newSearcher(reader);
-
- assertEquals(1, searcher.count(new TermQuery(new Term("fieldname", longTerm))));
-
- Query query = new TermQuery(new Term("fieldname", "text"));
- TopDocs hits = searcher.search(query, 1);
- assertEquals(1, hits.totalHits);
-
- // Iterate through the results.
- for (int i = 0; i < hits.scoreDocs.length; i++) {
- Document hitDoc = searcher.doc(hits.scoreDocs[i].doc);
- assertEquals(text, hitDoc.get("fieldname"));
- }
-
- // Test simple phrase query.
- PhraseQuery phraseQuery = new PhraseQuery("fieldname", "to", "be");
- assertEquals(1, searcher.count(phraseQuery));
- }
+ assertEquals(1, isearcher.search(new TermQuery(new Term("fieldname", longTerm)), 1).totalHits);
+ Query query = new TermQuery(new Term("fieldname", "text"));
+ TopDocs hits = isearcher.search(query, 1);
+ assertEquals(1, hits.totalHits);
+ // Iterate through the results:
+ for (int i = 0; i < hits.scoreDocs.length; i++) {
+ Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+ assertEquals(text, hitDoc.get("fieldname"));
}
- IOUtils.rm(indexPath);
+ // Test simple phrase query
+ PhraseQuery phraseQuery = new PhraseQuery("fieldname", "to", "be");
+ assertEquals(1, isearcher.search(phraseQuery, 1).totalHits);
+
+ ireader.close();
+ directory.close();
+ analyzer.close();
}
}