本文整理汇总了C#中Lucene.Net.Search.IndexSearcher.Close方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Search.IndexSearcher.Close方法的具体用法?C# Lucene.Net.Search.IndexSearcher.Close怎么用?C# Lucene.Net.Search.IndexSearcher.Close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Search.IndexSearcher
的用法示例。
在下文中一共展示了Lucene.Net.Search.IndexSearcher.Close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestDemo_Renamed_Method
public virtual void TestDemo_Renamed_Method()
{
Analyzer analyzer = new StandardAnalyzer();
// Store the index in memory:
Directory directory = new RAMDirectory();
// To store an index on disk, use this instead (note that the
// parameter true will overwrite the index in that directory
// if one exists):
//Directory directory = FSDirectory.getDirectory("/tmp/testindex", true);
IndexWriter iwriter = new IndexWriter(directory, analyzer, true);
iwriter.SetMaxFieldLength(25000);
Document doc = new Document();
System.String text = "This is the text to be indexed.";
doc.Add(new Field("fieldname", text, Field.Store.YES, Field.Index.TOKENIZED));
iwriter.AddDocument(doc);
iwriter.Close();
// Now search the index:
IndexSearcher isearcher = new IndexSearcher(directory);
// Parse a simple query that searches for "text":
Lucene.Net.QueryParsers.QueryParser parser = new Lucene.Net.QueryParsers.QueryParser("fieldname", analyzer);
Query query = parser.Parse("text");
Hits hits = isearcher.Search(query);
Assert.AreEqual(1, hits.Length());
// Iterate through the results:
for (int i = 0; i < hits.Length(); i++)
{
Document hitDoc = hits.Doc(i);
Assert.AreEqual("This is the text to be indexed.", hitDoc.Get("fieldname"));
}
isearcher.Close();
directory.Close();
}
示例2: TestDemo_Renamed
public virtual void TestDemo_Renamed()
{
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
// Store the index in memory:
Directory directory = new RAMDirectory();
// To store an index on disk, use this instead:
//Directory directory = FSDirectory.open("/tmp/testindex");
IndexWriter iwriter = new IndexWriter(directory, analyzer, true, new IndexWriter.MaxFieldLength(25000));
Document doc = new Document();
System.String text = "This is the text to be indexed.";
doc.Add(new Field("fieldname", text, Field.Store.YES, Field.Index.ANALYZED));
iwriter.AddDocument(doc);
iwriter.Close();
// Now search the index:
IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
// Parse a simple query that searches for "text":
QueryParser parser = new QueryParser("fieldname", analyzer);
Query query = parser.Parse("text");
ScoreDoc[] hits = isearcher.Search(query, null, 1000).scoreDocs;
Assert.AreEqual(1, hits.Length);
// Iterate through the results:
for (int i = 0; i < hits.Length; i++)
{
Document hitDoc = isearcher.Doc(hits[i].doc);
Assert.AreEqual(hitDoc.Get("fieldname"), "This is the text to be indexed.");
}
isearcher.Close();
directory.Close();
}
示例3: Main
public static void Main(System.String[] args)
{
System.String index = @"c:\EmailTest\LuceneDB";
IndexReader reader = IndexReader.Open(FSDirectory.Open(new System.IO.FileInfo(index)), true); // only searching, so read-only=true
Searcher searcher = new IndexSearcher(reader);
if (Stopwatch.IsHighResolution) {
System.Console.WriteLine("We have a high resolution timer with an frequency of {0} ticks/ms", Stopwatch.Frequency/1000);
}
searchFor(searcher, "jeske AND neotonic");
searchFor(searcher, "noticed AND problems");
searchFor(searcher, "data AND returned");
searchFor(searcher, "scott AND hassan");
searcher.Close();
reader.Close();
System.Console.WriteLine("done");
}
示例4: PreviouslyIndexed
public static bool PreviouslyIndexed(string url)
{
string indexFileLocation = indexDir;
Lucene.Net.Store.Directory dir = Lucene.Net.Store.FSDirectory.GetDirectory(indexFileLocation, false);
Lucene.Net.Search.IndexSearcher searcher = new Lucene.Net.Search.IndexSearcher(dir);
Lucene.Net.Search.Hits hits = null;
try
{
Lucene.Net.Search.Query query = new Lucene.Net.Search.TermQuery(new Lucene.Net.Index.Term("url", url));
hits = searcher.Search(query);
}
catch { }
finally
{
searcher.Close();
}
return hits.Length() > 0;
}
示例5: SearchProjects
public static List<IndexedItem> SearchProjects(string s)
{
List<IndexedItem> retVal = new List<IndexedItem>();
string indexFileLocation = indexDir;
Lucene.Net.Store.Directory dir = Lucene.Net.Store.FSDirectory.GetDirectory(indexFileLocation, false);
Lucene.Net.Search.IndexSearcher searcher = new Lucene.Net.Search.IndexSearcher(dir);
try
{
Lucene.Net.Search.Query query = new Lucene.Net.Search.TermQuery(new Lucene.Net.Index.Term("content", s));
query = query.Combine(new Lucene.Net.Search.Query[] { query, new Lucene.Net.Search.TermQuery(new Lucene.Net.Index.Term("url", fromUrl)) });
query = query.Combine(new Lucene.Net.Search.Query[] { query, new Lucene.Net.Search.TermQuery(new Lucene.Net.Index.Term("title", s)) });
//execute the query
Lucene.Net.Search.Hits hits = searcher.Search(query);
//iterate over the results.
for (int i = 0; i < hits.Length(); i++)
{
Lucene.Net.Documents.Document doc = hits.Doc(i);
string article = doc.Get("content");
string title = doc.Get("title");
string url = doc.Get("url");
retVal.Add(new IndexedItem { Article = article, Href = url, Title = title });
}
foreach (IndexedItem ind in retVal)
{
Console.WriteLine(ind.Href);
}
retVal = retVal.Distinct().ToList();
}
catch { }
finally
{
searcher.Close();
}
return retVal;
}
示例6: TestRAMDirectoryString
public virtual void TestRAMDirectoryString()
{
MockRAMDirectory ramDir = new MockRAMDirectory(indexDir.FullName);
// Check size
Assert.AreEqual(ramDir.SizeInBytes(), ramDir.GetRecomputedSizeInBytes());
// open reader to test document count
IndexReader reader = IndexReader.Open(ramDir);
Assert.AreEqual(docsToAdd, reader.NumDocs());
// open search zo check if all doc's are there
IndexSearcher searcher = new IndexSearcher(reader);
// search for all documents
for (int i = 0; i < docsToAdd; i++)
{
Document doc = searcher.Doc(i);
Assert.IsTrue(doc.GetField("content") != null);
}
// cleanup
reader.Close();
searcher.Close();
}
示例7: TestSetBufferSize
public virtual void TestSetBufferSize()
{
System.IO.DirectoryInfo indexDir = new System.IO.DirectoryInfo(System.IO.Path.Combine(AppSettings.Get("tempDir", ""), "testSetBufferSize"));
MockFSDirectory dir = new MockFSDirectory(indexDir, NewRandom());
try
{
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.UseCompoundFile = false;
for (int i = 0; i < 37; i++)
{
Document doc = new Document();
doc.Add(new Field("content", "aaa bbb ccc ddd" + i, Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("id", "" + i, Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
}
writer.Close();
dir.allIndexInputs.Clear();
IndexReader reader = IndexReader.Open(dir, false);
Term aaa = new Term("content", "aaa");
Term bbb = new Term("content", "bbb");
Term ccc = new Term("content", "ccc");
Assert.AreEqual(37, reader.DocFreq(ccc));
reader.DeleteDocument(0);
Assert.AreEqual(37, reader.DocFreq(aaa));
dir.tweakBufferSizes();
reader.DeleteDocument(4);
Assert.AreEqual(reader.DocFreq(bbb), 37);
dir.tweakBufferSizes();
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.Search(new TermQuery(bbb), null, 1000).ScoreDocs;
dir.tweakBufferSizes();
Assert.AreEqual(35, hits.Length);
dir.tweakBufferSizes();
hits = searcher.Search(new TermQuery(new Term("id", "33")), null, 1000).ScoreDocs;
dir.tweakBufferSizes();
Assert.AreEqual(1, hits.Length);
hits = searcher.Search(new TermQuery(aaa), null, 1000).ScoreDocs;
dir.tweakBufferSizes();
Assert.AreEqual(35, hits.Length);
searcher.Close();
reader.Close();
}
finally
{
_TestUtil.RmDir(indexDir);
}
}
示例8: ChangeIndexWithAdds
/* Open pre-lockless index, add docs, do a delete &
* setNorm, and search */
public virtual void ChangeIndexWithAdds(System.String dirName, bool autoCommit)
{
System.String origDirName = dirName;
dirName = FullDir(dirName);
Directory dir = FSDirectory.Open(new System.IO.FileInfo(dirName));
// open writer
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
// add 10 docs
for (int i = 0; i < 10; i++)
{
AddDoc(writer, 35 + i);
}
// make sure writer sees right total -- writer seems not to know about deletes in .del?
int expected;
if (Compare(origDirName, "24") < 0)
{
expected = 45;
}
else
{
expected = 46;
}
Assert.AreEqual(expected, writer.DocCount(), "wrong doc count");
writer.Close();
// make sure searching sees right # hits
IndexSearcher searcher = new IndexSearcher(dir);
ScoreDoc[] hits = searcher.Search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
Document d = searcher.Doc(hits[0].doc);
Assert.AreEqual("21", d.Get("id"), "wrong first document");
TestHits(hits, 44, searcher.GetIndexReader());
searcher.Close();
// make sure we can do delete & setNorm against this
// pre-lockless segment:
IndexReader reader = IndexReader.Open(dir);
Term searchTerm = new Term("id", "6");
int delCount = reader.DeleteDocuments(searchTerm);
Assert.AreEqual(1, delCount, "wrong delete count");
reader.SetNorm(22, "content", (float) 2.0);
reader.Close();
// make sure they "took":
searcher = new IndexSearcher(dir);
hits = searcher.Search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
Assert.AreEqual(43, hits.Length, "wrong number of hits");
d = searcher.Doc(hits[0].doc);
Assert.AreEqual("22", d.Get("id"), "wrong first document");
TestHits(hits, 43, searcher.GetIndexReader());
searcher.Close();
// optimize
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
writer.Optimize();
writer.Close();
searcher = new IndexSearcher(dir);
hits = searcher.Search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
Assert.AreEqual(43, hits.Length, "wrong number of hits");
d = searcher.Doc(hits[0].doc);
TestHits(hits, 43, searcher.GetIndexReader());
Assert.AreEqual("22", d.Get("id"), "wrong first document");
searcher.Close();
dir.Close();
}
示例9: ApplyDeletes
// Apply buffered delete terms, queries and docIDs to the
// provided reader
private bool ApplyDeletes(IndexReader reader, int docIDStart)
{
lock (this)
{
int docEnd = docIDStart + reader.MaxDoc();
bool any = false;
// Delete by term
IEnumerator<KeyValuePair<object, object>> iter = deletesFlushed.terms.GetEnumerator();
while (iter.MoveNext())
{
KeyValuePair<object, object> entry = (KeyValuePair<object, object>)iter.Current;
Term term = (Term)entry.Key;
TermDocs docs = reader.TermDocs(term);
if (docs != null)
{
int limit = ((BufferedDeletes.Num)entry.Value).GetNum();
try
{
while (docs.Next())
{
int docID = docs.Doc();
if (docIDStart + docID >= limit)
break;
reader.DeleteDocument(docID);
any = true;
}
}
finally
{
docs.Close();
}
}
}
// Delete by docID
IEnumerator<object> iter2 = deletesFlushed.docIDs.GetEnumerator();
while (iter2.MoveNext())
{
int docID = (int)iter2.Current;
if (docID >= docIDStart && docID < docEnd)
{
reader.DeleteDocument(docID - docIDStart);
any = true;
}
}
// Delete by query
IndexSearcher searcher = new IndexSearcher(reader);
iter = deletesFlushed.queries.GetEnumerator();
while (iter.MoveNext())
{
KeyValuePair<object, object> entry = (KeyValuePair<object, object>)iter.Current;
Query query = (Query)entry.Key;
int limit = (int)entry.Value;
Weight weight = query.Weight(searcher);
Scorer scorer = weight.Scorer(reader);
while (scorer.Next())
{
int docID = scorer.Doc();
if (docIDStart + docID >= limit)
break;
reader.DeleteDocument(docID);
any = true;
}
}
searcher.Close();
return any;
}
}
示例10: TestGetValuesForIndexedDocument
public virtual void TestGetValuesForIndexedDocument()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true);
writer.AddDocument(MakeDocumentWithFields());
writer.Close();
Searcher searcher = new IndexSearcher(dir);
// search for something that does exists
Query query = new TermQuery(new Term("keyword", "test1"));
// ensure that queries return expected results without DateFilter first
Hits hits = searcher.Search(query);
Assert.AreEqual(1, hits.Length());
try
{
DoAssert(hits.Doc(0), true);
}
catch (System.Exception e)
{
System.Console.Error.WriteLine(e.StackTrace);
System.Console.Error.Write("\n");
}
finally
{
searcher.Close();
}
}
示例11: TestSetBufferSize
public virtual void TestSetBufferSize()
{
System.IO.FileInfo indexDir = new System.IO.FileInfo(System.IO.Path.Combine(SupportClass.AppSettings.Get("tempDir", ""), "testSetBufferSize"));
MockFSDirectory dir = new MockFSDirectory(indexDir);
try
{
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
writer.SetUseCompoundFile(false);
for (int i = 0; i < 37; i++)
{
Document doc = new Document();
doc.Add(new Field("content", "aaa bbb ccc ddd" + i, Field.Store.YES, Field.Index.TOKENIZED));
doc.Add(new Field("id", "" + i, Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
}
writer.Close();
dir.allIndexInputs.Clear();
IndexReader reader = IndexReader.Open(dir);
Term aaa = new Term("content", "aaa");
Term bbb = new Term("content", "bbb");
Term ccc = new Term("content", "ccc");
Assert.AreEqual(reader.DocFreq(ccc), 37);
reader.DeleteDocument(0);
Assert.AreEqual(reader.DocFreq(aaa), 37);
dir.TweakBufferSizes();
reader.DeleteDocument(4);
Assert.AreEqual(reader.DocFreq(bbb), 37);
dir.TweakBufferSizes();
IndexSearcher searcher = new IndexSearcher(reader);
Hits hits = searcher.Search(new TermQuery(bbb));
dir.TweakBufferSizes();
Assert.AreEqual(35, hits.Length());
dir.TweakBufferSizes();
hits = searcher.Search(new TermQuery(new Term("id", "33")));
dir.TweakBufferSizes();
Assert.AreEqual(1, hits.Length());
hits = searcher.Search(new TermQuery(aaa));
dir.TweakBufferSizes();
Assert.AreEqual(35, hits.Length());
searcher.Close();
reader.Close();
}
finally
{
_TestUtil.RmDir(indexDir);
}
}
示例12: ApplyDeletes
// Apply buffered delete terms, queries and docIDs to the
// provided reader
private bool ApplyDeletes(IndexReader reader, int docIDStart)
{
lock (this)
{
int docEnd = docIDStart + reader.MaxDoc();
bool any = false;
System.Diagnostics.Debug.Assert(CheckDeleteTerm(null));
// Delete by term
//System.Collections.IEnumerator iter = new System.Collections.Hashtable(deletesFlushed.terms).GetEnumerator();
System.Collections.IEnumerator iter = deletesFlushed.terms.GetEnumerator();
TermDocs docs = reader.TermDocs();
try
{
while (iter.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
Term term = (Term) entry.Key;
// LUCENE-2086: we should be iterating a TreeMap,
// here, so terms better be in order:
System.Diagnostics.Debug.Assert(CheckDeleteTerm(term));
docs.Seek(term);
int limit = ((BufferedDeletes.Num) entry.Value).GetNum();
while (docs.Next())
{
int docID = docs.Doc();
if (docIDStart + docID >= limit)
break;
reader.DeleteDocument(docID);
any = true;
}
}
}
finally
{
docs.Close();
}
// Delete by docID
iter = deletesFlushed.docIDs.GetEnumerator();
while (iter.MoveNext())
{
int docID = ((System.Int32) iter.Current);
if (docID >= docIDStart && docID < docEnd)
{
reader.DeleteDocument(docID - docIDStart);
any = true;
}
}
// Delete by query
IndexSearcher searcher = new IndexSearcher(reader);
iter = new System.Collections.Hashtable(deletesFlushed.queries).GetEnumerator();
while (iter.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
Query query = (Query) entry.Key;
int limit = ((System.Int32) entry.Value);
Weight weight = query.Weight(searcher);
Scorer scorer = weight.Scorer(reader, true, false);
if (scorer != null)
{
while (true)
{
int doc = scorer.NextDoc();
if (((long) docIDStart) + doc >= limit)
break;
reader.DeleteDocument(doc);
any = true;
}
}
}
searcher.Close();
return any;
}
}
示例13: TestNegativePositions
public void TestNegativePositions()
{
SinkTokenizer tokens = new SinkTokenizer();
Token t = new Token();
t.SetTermText("a");
t.SetPositionIncrement(0);
tokens.Add(t);
t.SetTermText("b");
t.SetPositionIncrement(1);
tokens.Add(t);
t.SetTermText("c");
tokens.Add(t);
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true);
Document doc = new Document();
doc.Add(new Field("field", tokens));
w.AddDocument(doc);
w.Close();
IndexSearcher s = new IndexSearcher(dir);
PhraseQuery pq = new PhraseQuery();
pq.Add(new Term("field", "a"));
pq.Add(new Term("field", "b"));
pq.Add(new Term("field", "c"));
Hits hits = s.Search(pq);
Assert.AreEqual(1, hits.Length());
Query q = new SpanTermQuery(new Term("field", "a"));
hits = s.Search(q);
Assert.AreEqual(1, hits.Length());
TermPositions tps = s.GetIndexReader().TermPositions(new Term("field", "a"));
Assert.IsTrue(tps.Next());
Assert.AreEqual(1, tps.Freq());
Assert.AreEqual(-1, tps.NextPosition());
Assert.IsTrue(_TestUtil.CheckIndex(dir));
s.Close();
dir.Close();
}
示例14: TestEnablingNorms
public virtual void TestEnablingNorms()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
writer.SetMaxBufferedDocs(10);
// Enable norms for only 1 doc, pre flush
for (int j = 0; j < 10; j++)
{
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED);
if (j != 8)
{
f.SetOmitNorms(true);
}
doc.Add(f);
writer.AddDocument(doc);
}
writer.Close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir);
Hits hits = searcher.Search(new TermQuery(searchTerm));
Assert.AreEqual(10, hits.Length());
searcher.Close();
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
writer.SetMaxBufferedDocs(10);
// Enable norms for only 1 doc, post flush
for (int j = 0; j < 27; j++)
{
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED);
if (j != 26)
{
f.SetOmitNorms(true);
}
doc.Add(f);
writer.AddDocument(doc);
}
writer.Close();
searcher = new IndexSearcher(dir);
hits = searcher.Search(new TermQuery(searchTerm));
Assert.AreEqual(27, hits.Length());
searcher.Close();
IndexReader reader = IndexReader.Open(dir);
reader.Close();
dir.Close();
}
示例15: TestDiverseDocs
public virtual void TestDiverseDocs()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
writer.SetRAMBufferSizeMB(0.5);
System.Random rand = new System.Random((System.Int32) 31415);
for (int i = 0; i < 3; i++)
{
// First, docs where every term is unique (heavy on
// Posting instances)
for (int j = 0; j < 100; j++)
{
Document doc = new Document();
for (int k = 0; k < 100; k++)
{
doc.Add(new Field("field", System.Convert.ToString(rand.Next()), Field.Store.YES, Field.Index.TOKENIZED));
}
writer.AddDocument(doc);
}
// Next, many single term docs where only one term
// occurs (heavy on byte blocks)
for (int j = 0; j < 100; j++)
{
Document doc = new Document();
doc.Add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
}
// Next, many single term docs where only one term
// occurs but the terms are very long (heavy on
// char[] arrays)
for (int j = 0; j < 100; j++)
{
System.Text.StringBuilder b = new System.Text.StringBuilder();
System.String x = System.Convert.ToString(j) + ".";
for (int k = 0; k < 1000; k++)
b.Append(x);
System.String longTerm = b.ToString();
Document doc = new Document();
doc.Add(new Field("field", longTerm, Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
}
}
writer.Close();
IndexSearcher searcher = new IndexSearcher(dir);
Hits hits = searcher.Search(new TermQuery(new Term("field", "aaa")));
Assert.AreEqual(300, hits.Length());
searcher.Close();
dir.Close();
}