本文整理汇总了C#中Lucene.Net.Store.RAMDirectory类的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Store.RAMDirectory类的具体用法?C# Lucene.Net.Store.RAMDirectory怎么用?C# Lucene.Net.Store.RAMDirectory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Lucene.Net.Store.RAMDirectory类属于命名空间,在下文中一共展示了Lucene.Net.Store.RAMDirectory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestDemo_Renamed_Method
public virtual void TestDemo_Renamed_Method()
{
Analyzer analyzer = new StandardAnalyzer();
// Store the index in memory:
Directory directory = new RAMDirectory();
// To store an index on disk, use this instead (note that the
// parameter true will overwrite the index in that directory
// if one exists):
//Directory directory = FSDirectory.getDirectory("/tmp/testindex", true);
IndexWriter iwriter = new IndexWriter(directory, analyzer, true);
iwriter.SetMaxFieldLength(25000);
Document doc = new Document();
System.String text = "This is the text to be indexed.";
doc.Add(new Field("fieldname", text, Field.Store.YES, Field.Index.TOKENIZED));
iwriter.AddDocument(doc);
iwriter.Close();
// Now search the index:
IndexSearcher isearcher = new IndexSearcher(directory);
// Parse a simple query that searches for "text":
Lucene.Net.QueryParsers.QueryParser parser = new Lucene.Net.QueryParsers.QueryParser("fieldname", analyzer);
Query query = parser.Parse("text");
Hits hits = isearcher.Search(query);
Assert.AreEqual(1, hits.Length());
// Iterate through the results:
for (int i = 0; i < hits.Length(); i++)
{
Document hitDoc = hits.Doc(i);
Assert.AreEqual("This is the text to be indexed.", hitDoc.Get("fieldname"));
}
isearcher.Close();
directory.Close();
}
示例2: TestMethod
public virtual void TestMethod()
{
RAMDirectory directory = new RAMDirectory();
System.String[] values = new System.String[]{"1", "2", "3", "4"};
try
{
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < values.Length; i++)
{
Document doc = new Document();
doc.Add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.AddDocument(doc);
}
writer.Close();
BooleanQuery booleanQuery1 = new BooleanQuery();
booleanQuery1.Add(new TermQuery(new Term(FIELD, "1")), Occur.SHOULD);
booleanQuery1.Add(new TermQuery(new Term(FIELD, "2")), Occur.SHOULD);
BooleanQuery query = new BooleanQuery();
query.Add(booleanQuery1, Occur.MUST);
query.Add(new TermQuery(new Term(FIELD, "9")), Occur.MUST_NOT);
IndexSearcher indexSearcher = new IndexSearcher(directory, true);
ScoreDoc[] hits = indexSearcher.Search(query, null, 1000).ScoreDocs;
Assert.AreEqual(2, hits.Length, "Number of matched documents");
}
catch (System.IO.IOException e)
{
Assert.Fail(e.Message);
}
}
示例3: SetUp
public override void SetUp()
{
base.SetUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.Add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("sorter", "b", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("field", "one two three four", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("sorter", "d", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("field", "one two three y", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("sorter", "a", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("field", "one two x", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("sorter", "c", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
searcher = new IndexSearcher(directory);
query = new TermQuery(new Term("field", "three"));
filter = NewStaticFilterB();
}
示例4: MakeIndex
private static Directory MakeIndex()
{
Directory dir = new RAMDirectory();
try
{
System.Random r = new System.Random((System.Int32) (BASE_SEED + 42));
Analyzer analyzer = new SimpleAnalyzer();
IndexWriter writer = new IndexWriter(dir, analyzer, true);
writer.SetUseCompoundFile(false);
for (int d = 1; d <= NUM_DOCS; d++)
{
Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
for (int f = 1; f <= NUM_FIELDS; f++)
{
doc.Add(new Field("f" + f, data[f % data.Length] + '#' + data[r.Next(data.Length)], Field.Store.YES, Field.Index.TOKENIZED));
}
writer.AddDocument(doc);
}
writer.Close();
}
catch (System.Exception e)
{
throw new System.SystemException("", e);
}
return dir;
}
示例5: Main
/*public TestCustomSearcherSort(System.String name):base(name)
{
}*/
/*[STAThread]
public static void Main(System.String[] argv)
{
// TestRunner.run(suite()); // {{Aroush-2.9}} how is this done in NUnit?
}*/
/*public static Test suite()
{
return new TestSuite(typeof(TestCustomSearcherSort));
}*/
// create an index for testing
private Directory GetIndex()
{
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
RandomGen random = new RandomGen(this, NewRandom());
for (int i = 0; i < INDEX_SIZE; ++i)
{
// don't decrease; if to low the problem doesn't show up
Document doc = new Document();
if ((i % 5) != 0)
{
// some documents must not have an entry in the first sort field
doc.Add(new Field("publicationDate_", random.GetLuceneDate(), Field.Store.YES, Field.Index.NOT_ANALYZED));
}
if ((i % 7) == 0)
{
// some documents to match the query (see below)
doc.Add(new Field("content", "test", Field.Store.YES, Field.Index.ANALYZED));
}
// every document has a defined 'mandant' field
doc.Add(new Field("mandant", System.Convert.ToString(i % 3), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.AddDocument(doc);
}
writer.Optimize();
writer.Close();
return indexStore;
}
示例6: SetUp
public override void SetUp()
{
base.SetUp();
base.SetUp();
Lucene.Net.Documents.Document doc;
RAMDirectory rd1 = new RAMDirectory();
IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(), true);
doc = new Lucene.Net.Documents.Document();
doc.Add(new Field("field1", "the quick brown fox jumps", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.TOKENIZED));
doc.Add(new Field("field2", "the quick brown fox jumps", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.TOKENIZED));
doc.Add(new Field("field4", "", Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.TOKENIZED));
iw1.AddDocument(doc);
iw1.Close();
RAMDirectory rd2 = new RAMDirectory();
IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(), true);
doc = new Lucene.Net.Documents.Document();
doc.Add(new Field("field0", "", Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.TOKENIZED));
doc.Add(new Field("field1", "the fox jumps over the lazy dog", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.TOKENIZED));
doc.Add(new Field("field3", "the fox jumps over the lazy dog", Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.TOKENIZED));
iw2.AddDocument(doc);
iw2.Close();
this.ir1 = IndexReader.Open(rd1);
this.ir2 = IndexReader.Open(rd2);
}
示例7: TestSimpleSkip
public virtual void TestSimpleSkip()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Term term = new Term("test", "a");
for (int i = 0; i < 5000; i++)
{
Document d1 = new Document();
d1.Add(new Field(term.Field(), term.Text(), Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(d1);
}
writer.Flush();
writer.Optimize();
writer.Close();
IndexReader reader = SegmentReader.GetOnlySegmentReader(dir);
SegmentTermPositions tp = (SegmentTermPositions) reader.TermPositions();
tp.freqStream_ForNUnit = new CountingStream(this, tp.freqStream_ForNUnit);
for (int i = 0; i < 2; i++)
{
counter = 0;
tp.Seek(term);
CheckSkipTo(tp, 14, 185); // no skips
CheckSkipTo(tp, 17, 190); // one skip on level 0
CheckSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
// this test would fail if we had only one skip level,
// because than more bytes would be read from the freqStream
CheckSkipTo(tp, 4800, 250); // one skip on level 2
}
}
示例8: TestMethod
public virtual void TestMethod()
{
RAMDirectory directory = new RAMDirectory();
System.String[] values = new System.String[]{"1", "2", "3", "4"};
try
{
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
for (int i = 0; i < values.Length; i++)
{
Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
doc.Add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.UN_TOKENIZED));
writer.AddDocument(doc);
}
writer.Close();
BooleanQuery booleanQuery1 = new BooleanQuery();
booleanQuery1.Add(new TermQuery(new Term(FIELD, "1")), BooleanClause.Occur.SHOULD);
booleanQuery1.Add(new TermQuery(new Term(FIELD, "2")), BooleanClause.Occur.SHOULD);
BooleanQuery query = new BooleanQuery();
query.Add(booleanQuery1, BooleanClause.Occur.MUST);
query.Add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT);
IndexSearcher indexSearcher = new IndexSearcher(directory);
Hits hits = indexSearcher.Search(query);
Assert.AreEqual(2, hits.Length(), "Number of matched documents");
}
catch (System.IO.IOException e)
{
Assert.Fail(e.Message);
}
}
示例9: TestDateCompression
public virtual void TestDateCompression()
{
Directory dir = new RAMDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
IndexWriter iwriter = new IndexWriter(dir, iwc);
const long @base = 13; // prime
long day = 1000L * 60 * 60 * 24;
Document doc = new Document();
NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
doc.Add(dvf);
for (int i = 0; i < 300; ++i)
{
dvf.LongValue = @base + Random().Next(1000) * day;
iwriter.AddDocument(doc);
}
iwriter.ForceMerge(1);
long size1 = DirSize(dir);
for (int i = 0; i < 50; ++i)
{
dvf.LongValue = @base + Random().Next(1000) * day;
iwriter.AddDocument(doc);
}
iwriter.ForceMerge(1);
long size2 = DirSize(dir);
// make sure the new longs costed less than if they had only been packed
Assert.IsTrue(size2 < size1 + (PackedInts.BitsRequired(day) * 50) / 8);
}
示例10: TestQuery
public virtual void TestQuery()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true);
AddDoc("one", iw);
AddDoc("two", iw);
AddDoc("three four", iw);
iw.Close();
IndexSearcher is_Renamed = new IndexSearcher(dir);
Hits hits = is_Renamed.Search(new MatchAllDocsQuery());
Assert.AreEqual(3, hits.Length());
// some artificial queries to trigger the use of skipTo():
BooleanQuery bq = new BooleanQuery();
bq.Add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
bq.Add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
hits = is_Renamed.Search(bq);
Assert.AreEqual(3, hits.Length());
bq = new BooleanQuery();
bq.Add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST);
bq.Add(new TermQuery(new Term("key", "three")), BooleanClause.Occur.MUST);
hits = is_Renamed.Search(bq);
Assert.AreEqual(1, hits.Length());
// delete a document:
is_Renamed.GetIndexReader().DeleteDocument(0);
hits = is_Renamed.Search(new MatchAllDocsQuery());
Assert.AreEqual(2, hits.Length());
is_Renamed.Close();
}
示例11: SetUp
public virtual void SetUp()
{
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
Document doc = new Document();
doc.Add(Field.Text("Field", "one two three four five"));
doc.Add(Field.Text("sorter", "b"));
writer.AddDocument(doc);
doc = new Document();
doc.Add(Field.Text("Field", "one two three four"));
doc.Add(Field.Text("sorter", "d"));
writer.AddDocument(doc);
doc = new Document();
doc.Add(Field.Text("Field", "one two three y"));
doc.Add(Field.Text("sorter", "a"));
writer.AddDocument(doc);
doc = new Document();
doc.Add(Field.Text("Field", "one two x"));
doc.Add(Field.Text("sorter", "c"));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
searcher = new IndexSearcher(directory);
query = new TermQuery(new Term("Field", "three"));
filter = new AnonymousClassFilter(this);
}
示例12: SetUp
public override void SetUp()
{
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
Lucene.Net.Documents.Document doc = new Lucene.Net.Documents.Document();
doc.Add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.TOKENIZED));
doc.Add(new Field("sorter", "b", Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
doc = new Lucene.Net.Documents.Document();
doc.Add(new Field("field", "one two three four", Field.Store.YES, Field.Index.TOKENIZED));
doc.Add(new Field("sorter", "d", Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
doc = new Lucene.Net.Documents.Document();
doc.Add(new Field("field", "one two three y", Field.Store.YES, Field.Index.TOKENIZED));
doc.Add(new Field("sorter", "a", Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
doc = new Lucene.Net.Documents.Document();
doc.Add(new Field("field", "one two x", Field.Store.YES, Field.Index.TOKENIZED));
doc.Add(new Field("sorter", "c", Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
searcher = new IndexSearcher(directory);
query = new TermQuery(new Term("field", "three"));
filter = new AnonymousClassFilter();
}
示例13: SetUp
public override void SetUp()
{
base.SetUp();
directory = new RAMDirectory();
Analyzer analyzer = new AnonymousClassAnalyzer(this);
IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.Add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("repeated", "this is a repeated field - first part", Field.Store.YES, Field.Index.ANALYZED));
IFieldable repeatedField = new Field("repeated", "second part of a repeated field", Field.Store.YES, Field.Index.ANALYZED);
doc.Add(repeatedField);
doc.Add(new Field("palindrome", "one two three two one", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
searcher = new IndexSearcher(directory, true);
query = new PhraseQuery();
}
示例14: TestCachingWorks
public virtual void TestCachingWorks()
{
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.Close();
IndexReader reader = IndexReader.Open(dir);
MockFilter filter = new MockFilter();
CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
// first time, nested filter is called
cacher.GetDocIdSet(reader);
Assert.IsTrue(filter.WasCalled(), "first time");
// make sure no exception if cache is holding the wrong bitset
cacher.Bits(reader);
cacher.GetDocIdSet(reader);
// second time, nested filter should not be called
filter.Clear();
cacher.GetDocIdSet(reader);
Assert.IsFalse(filter.WasCalled(), "second time");
reader.Close();
}
示例15: TestAllSegmentsSmall
public virtual void TestAllSegmentsSmall()
{
Directory dir = new RAMDirectory();
IndexWriterConfig conf = NewWriterConfig();
IndexWriter writer = new IndexWriter(dir, conf);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 3);
AddDocs(writer, 3);
writer.Dispose();
conf = NewWriterConfig();
LogMergePolicy lmp = new LogDocMergePolicy();
lmp.MaxMergeDocs = 3;
conf.SetMergePolicy(lmp);
writer = new IndexWriter(dir, conf);
writer.ForceMerge(1);
writer.Dispose();
SegmentInfos sis = new SegmentInfos();
sis.Read(dir);
Assert.AreEqual(1, sis.Size());
}