本文整理汇总了C#中Lucene.Net.Documents.Field类的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Documents.Field类的具体用法?C# Lucene.Net.Documents.Field怎么用?C# Lucene.Net.Documents.Field使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Lucene.Net.Documents.Field类属于命名空间,在下文中一共展示了Lucene.Net.Documents.Field类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Index
void Index()
{
Lucene.Net.Index.IndexWriter wr = new Lucene.Net.Index.IndexWriter(dir, new Lucene.Net.Analysis.WhitespaceAnalyzer(), Lucene.Net.Index.IndexWriter.MaxFieldLength.UNLIMITED);
Lucene.Net.Documents.Document doc = null;
Lucene.Net.Documents.Field f = null;
doc = new Lucene.Net.Documents.Document();
f = new Lucene.Net.Documents.Field("field", "a b c d", Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED);
doc.Add(f);
wr.AddDocument(doc);
doc = new Lucene.Net.Documents.Document();
f = new Lucene.Net.Documents.Field("field", "a b a d", Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED);
doc.Add(f);
wr.AddDocument(doc);
doc = new Lucene.Net.Documents.Document();
f = new Lucene.Net.Documents.Field("field", "a b e f", Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED);
doc.Add(f);
wr.AddDocument(doc);
doc = new Lucene.Net.Documents.Document();
f = new Lucene.Net.Documents.Field("field", "x y z", Lucene.Net.Documents.Field.Store.NO, Lucene.Net.Documents.Field.Index.ANALYZED);
doc.Add(f);
wr.AddDocument(doc);
wr.Close();
}
示例2: Doc
protected internal static Document Doc(Field[] fields)
{
Document doc = new Document();
for (int i = 0; i < fields.Length; i++)
{
doc.Add(fields[i]);
}
return doc;
}
示例3: TestFlushExceptions
public virtual void TestFlushExceptions()
{
MockRAMDirectory directory = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
directory.FailOn(failure);
IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 20; j++)
{
idField.SetValue(System.Convert.ToString(i * 20 + j));
writer.AddDocument(doc);
}
writer.AddDocument(doc);
failure.SetDoFail();
try
{
writer.Flush();
Assert.Fail("failed to hit IOException");
}
catch (System.IO.IOException ioe)
{
failure.ClearDoFail();
}
}
writer.Close();
IndexReader reader = IndexReader.Open(directory);
Assert.AreEqual(200, reader.NumDocs());
reader.Close();
directory.Close();
}
示例4: AssignFieldValues
private static void AssignFieldValues(SearchResult result, Field uriField, List<SitecoreItem> items)
{
var itemInfo = new SitecoreItem(new ItemUri(uriField.StringValue()));
foreach (Field field in result.Document.GetFields())
{
itemInfo.Fields[field.Name()] = field.StringValue();
}
items.Add(itemInfo);
}
示例5: MakeDocument
private static Document MakeDocument(System.String docText)
{
Document doc = new Document();
Field f = new Field("f", docText, Field.Store.NO, Field.Index.ANALYZED);
f.SetOmitNorms(true);
doc.Add(f);
return doc;
}
示例6: SetUp
public override void SetUp()
{
base.SetUp();
RAMDirectory directory = new RAMDirectory();
PayloadAnalyzer analyzer = new PayloadAnalyzer(this);
IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetSimilarity(similarity);
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++)
{
Document doc = new Document();
Field noPayloadField = new Field(PayloadHelper.NO_PAYLOAD_FIELD, English.IntToEnglish(i), Field.Store.YES, Field.Index.ANALYZED);
//noPayloadField.setBoost(0);
doc.Add(noPayloadField);
doc.Add(new Field("field", English.IntToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("multiField", English.IntToEnglish(i) + " " + English.IntToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
}
writer.Optimize();
writer.Close();
searcher = new IndexSearcher(directory);
searcher.SetSimilarity(similarity);
}
示例7: AddNoProxDoc
private void AddNoProxDoc(IndexWriter writer)
{
Document doc = new Document();
Field f = new Field("content3", "aaa", Field.Store.YES, Field.Index.ANALYZED);
f.SetOmitTf(true);
doc.Add(f);
f = new Field("content4", "aaa", Field.Store.YES, Field.Index.NO);
f.SetOmitTf(true);
doc.Add(f);
writer.AddDocument(doc);
}
示例8: TestMixedRAM
public virtual void TestMixedRAM()
{
Directory ram = new MockRAMDirectory();
Analyzer analyzer = new StandardAnalyzer(Util.Version.LUCENE_CURRENT);
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(10);
writer.MergeFactor = 2;
Document d = new Document();
// this field will have Tf
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
d.Add(f1);
// this field will NOT have Tf
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
d.Add(f2);
for (int i = 0; i < 5; i++)
writer.AddDocument(d);
f2.OmitTermFreqAndPositions = true;
for (int i = 0; i < 20; i++)
writer.AddDocument(d);
// force merge
writer.Optimize();
// flush
writer.Close();
_TestUtil.CheckIndex(ram);
SegmentReader reader = SegmentReader.GetOnlySegmentReader(ram);
FieldInfos fi = reader.FieldInfos();
Assert.IsTrue(!fi.FieldInfo("f1").omitTermFreqAndPositions_ForNUnit, "OmitTermFreqAndPositions field bit should not be set.");
Assert.IsTrue(fi.FieldInfo("f2").omitTermFreqAndPositions_ForNUnit, "OmitTermFreqAndPositions field bit should be set.");
reader.Close();
ram.Close();
}
示例9: AddDoc
private void AddDoc(IndexWriter writer, System.String text)
{
Document d = new Document();
Field f = new Field(FIELD_NAME, text, Field.Store.YES, Field.Index.TOKENIZED);
d.Add(f);
writer.AddDocument(d);
}
示例10: TestTermVectorCorruption3
public virtual void TestTermVectorCorruption3()
{
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, false, new StandardAnalyzer());
writer.SetMaxBufferedDocs(2);
writer.SetRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
writer.SetMergeScheduler(new SerialMergeScheduler());
writer.SetMergePolicy(new LogDocMergePolicy());
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES, Field.Index.NO);
document.Add(storedField);
Field termVectorField = new Field("termVector", "termVector", Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
document.Add(termVectorField);
for (int i = 0; i < 10; i++)
writer.AddDocument(document);
writer.Close();
writer = new IndexWriter(dir, false, new StandardAnalyzer());
writer.SetMaxBufferedDocs(2);
writer.SetRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
writer.SetMergeScheduler(new SerialMergeScheduler());
writer.SetMergePolicy(new LogDocMergePolicy());
for (int i = 0; i < 6; i++)
writer.AddDocument(document);
writer.Optimize();
writer.Close();
IndexReader reader = IndexReader.Open(dir);
for (int i = 0; i < 10; i++)
{
reader.GetTermFreqVectors(i);
reader.Document(i);
}
reader.Close();
dir.Close();
}
示例11: TestNoWaitClose
public virtual void TestNoWaitClose()
{
RAMDirectory directory = new MockRAMDirectory();
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
for (int pass = 0; pass < 2; pass++)
{
bool autoCommit = pass == 0;
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
for (int iter = 0; iter < 10; iter++)
{
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(100);
for (int j = 0; j < 201; j++)
{
idField.SetValue(System.Convert.ToString(iter * 201 + j));
writer.AddDocument(doc);
}
int delID = iter * 201;
for (int j = 0; j < 20; j++)
{
writer.DeleteDocuments(new Term("id", System.Convert.ToString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
writer.SetMergeFactor(3);
writer.AddDocument(doc);
writer.Flush();
writer.Close(false);
IndexReader reader = IndexReader.Open(directory);
Assert.AreEqual((1 + iter) * 182, reader.NumDocs());
reader.Close();
// Reopen
writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
}
writer.Close();
}
directory.Close();
}
示例12: TestEnablingNorms
public virtual void TestEnablingNorms()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
writer.SetMaxBufferedDocs(10);
// Enable norms for only 1 doc, pre flush
for (int j = 0; j < 10; j++)
{
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED);
if (j != 8)
{
f.SetOmitNorms(true);
}
doc.Add(f);
writer.AddDocument(doc);
}
writer.Close();
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir);
Hits hits = searcher.Search(new TermQuery(searchTerm));
Assert.AreEqual(10, hits.Length());
searcher.Close();
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
writer.SetMaxBufferedDocs(10);
// Enable norms for only 1 doc, post flush
for (int j = 0; j < 27; j++)
{
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED);
if (j != 26)
{
f.SetOmitNorms(true);
}
doc.Add(f);
writer.AddDocument(doc);
}
writer.Close();
searcher = new IndexSearcher(dir);
hits = searcher.Search(new TermQuery(searchTerm));
Assert.AreEqual(27, hits.Length());
searcher.Close();
IndexReader reader = IndexReader.Open(dir);
reader.Close();
dir.Close();
}
示例13: TestTermVectorCorruption2
public virtual void TestTermVectorCorruption2()
{
Directory dir = new MockRAMDirectory();
for (int iter = 0; iter < 4; iter++)
{
bool autoCommit = 1 == iter / 2;
IndexWriter writer = new IndexWriter(dir, autoCommit, new StandardAnalyzer());
writer.SetMaxBufferedDocs(2);
writer.SetRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
writer.SetMergeScheduler(new SerialMergeScheduler());
writer.SetMergePolicy(new LogDocMergePolicy());
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES, Field.Index.NO);
document.Add(storedField);
writer.AddDocument(document);
writer.AddDocument(document);
document = new Document();
document.Add(storedField);
Field termVectorField = new Field("termVector", "termVector", Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
document.Add(termVectorField);
writer.AddDocument(document);
writer.Optimize();
writer.Close();
IndexReader reader = IndexReader.Open(dir);
Assert.IsTrue(reader.GetTermFreqVectors(0) == null);
Assert.IsTrue(reader.GetTermFreqVectors(1) == null);
Assert.IsTrue(reader.GetTermFreqVectors(2) != null);
reader.Close();
}
dir.Close();
}
示例14: TestDeletesNumDocs
public void TestDeletesNumDocs()
{
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(),
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.Add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
Field id = new Field("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(id);
id.SetValue("0");
w.AddDocument(doc);
id.SetValue("1");
w.AddDocument(doc);
IndexReader r = w.GetReader();
Assert.AreEqual(2, r.NumDocs());
r.Close();
w.DeleteDocuments(new Term("id", "0"));
r = w.GetReader();
Assert.AreEqual(1, r.NumDocs());
r.Close();
w.DeleteDocuments(new Term("id", "1"));
r = w.GetReader();
Assert.AreEqual(0, r.NumDocs());
r.Close();
w.Close();
dir.Close();
}
示例15: TestBasic
public virtual void TestBasic()
{
Directory dir = new MockRAMDirectory();
Analyzer analyzer = new StandardAnalyzer(Util.Version.LUCENE_CURRENT);
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.MergeFactor = 2;
writer.SetMaxBufferedDocs(2);
writer.SetSimilarity(new SimpleSimilarity());
System.Text.StringBuilder sb = new System.Text.StringBuilder(265);
System.String term = "term";
for (int i = 0; i < 30; i++)
{
Document d = new Document();
sb.Append(term).Append(" ");
System.String content = sb.ToString();
Field noTf = new Field("noTf", content + (i % 2 == 0?"":" notf"), Field.Store.NO, Field.Index.ANALYZED);
noTf.OmitTermFreqAndPositions = true;
d.Add(noTf);
Field tf = new Field("tf", content + (i % 2 == 0?" tf":""), Field.Store.NO, Field.Index.ANALYZED);
d.Add(tf);
writer.AddDocument(d);
//System.out.println(d);
}
writer.Optimize();
// flush
writer.Close();
_TestUtil.CheckIndex(dir);
/*
* Verify the index
*/
Searcher searcher = new IndexSearcher(dir, true);
searcher.Similarity = new SimpleSimilarity();
Term a = new Term("noTf", term);
Term b = new Term("tf", term);
Term c = new Term("noTf", "notf");
Term d2 = new Term("tf", "tf");
TermQuery q1 = new TermQuery(a);
TermQuery q2 = new TermQuery(b);
TermQuery q3 = new TermQuery(c);
TermQuery q4 = new TermQuery(d2);
searcher.Search(q1, new AnonymousClassCountingHitCollector(this));
//System.out.println(CountingHitCollector.getCount());
searcher.Search(q2, new AnonymousClassCountingHitCollector1(this));
//System.out.println(CountingHitCollector.getCount());
searcher.Search(q3, new AnonymousClassCountingHitCollector2(this));
//System.out.println(CountingHitCollector.getCount());
searcher.Search(q4, new AnonymousClassCountingHitCollector3(this));
//System.out.println(CountingHitCollector.getCount());
BooleanQuery bq = new BooleanQuery();
bq.Add(q1, Occur.MUST);
bq.Add(q4, Occur.MUST);
searcher.Search(bq, new AnonymousClassCountingHitCollector4(this));
Assert.IsTrue(15 == CountingHitCollector.GetCount());
searcher.Close();
dir.Close();
}