本文整理汇总了C#中Lucene.Net.Index.IndexWriter.AddDocument方法的典型用法代码示例。如果您正苦于以下问题:C# IndexWriter.AddDocument方法的具体用法?C# IndexWriter.AddDocument怎么用?C# IndexWriter.AddDocument使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.IndexWriter
的用法示例。
在下文中一共展示了IndexWriter.AddDocument方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: SetUp
public void SetUp()
{
var writer = new IndexWriter(store, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
var doc = new Document();
doc.Add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("contents", "Tom", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("contents", "Jerry", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("zzz", "bar", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
}
示例2: TestDateCompression
public virtual void TestDateCompression()
{
Directory dir = new RAMDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
IndexWriter iwriter = new IndexWriter(dir, iwc);
const long @base = 13; // prime
long day = 1000L * 60 * 60 * 24;
Document doc = new Document();
NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
doc.Add(dvf);
for (int i = 0; i < 300; ++i)
{
dvf.LongValue = @base + Random().Next(1000) * day;
iwriter.AddDocument(doc);
}
iwriter.ForceMerge(1);
long size1 = DirSize(dir);
for (int i = 0; i < 50; ++i)
{
dvf.LongValue = @base + Random().Next(1000) * day;
iwriter.AddDocument(doc);
}
iwriter.ForceMerge(1);
long size2 = DirSize(dir);
// make sure the new longs costed less than if they had only been packed
Assert.IsTrue(size2 < size1 + (PackedInts.BitsRequired(day) * 50) / 8);
}
示例3: TestBackToTheFuture
public virtual void TestBackToTheFuture()
{
Directory dir = NewDirectory();
IndexWriter iw = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, null));
Document doc = new Document();
doc.Add(NewStringField("foo", "bar", Field.Store.NO));
iw.AddDocument(doc);
doc = new Document();
doc.Add(NewStringField("foo", "baz", Field.Store.NO));
iw.AddDocument(doc);
DirectoryReader r1 = DirectoryReader.Open(iw, true);
iw.DeleteDocuments(new Term("foo", "baz"));
DirectoryReader r2 = DirectoryReader.Open(iw, true);
FieldCache.DEFAULT.GetDocTermOrds(GetOnlySegmentReader(r2), "foo");
SortedSetDocValues v = FieldCache.DEFAULT.GetDocTermOrds(GetOnlySegmentReader(r1), "foo");
Assert.AreEqual(2, v.ValueCount);
v.Document = 1;
Assert.AreEqual(1, v.NextOrd());
iw.Dispose();
r1.Dispose();
r2.Dispose();
dir.Dispose();
}
示例4: AddDocuments
private static void AddDocuments(IndexWriter writer) {
var pages = PagesMetadata.Instance;
var posts = PostsMetadata.Instance;
foreach (var page in pages.List) {
var doc = new Document();
doc.Add(new Field("Url", "/" + page.Slug, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("Title", page.Title, Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("Body", new Page(page.Slug, pages).BodyWithoutHtml, Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
}
foreach (var post in posts.List) {
var doc = new Document();
doc.Add(new Field("Url", "/blog/" + post.Slug, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("Title", post.Title, Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("Description", post.ShortDescription, Field.Store.YES, Field.Index.ANALYZED));
if (post.PublishDate != DateTime.MinValue)
doc.Add(new Field("PublishDate", post.PublishDate.ToString("dd MMMM yyyy"), Field.Store.YES, Field.Index.NOT_ANALYZED));
if (post.LastUpdatedDate != DateTime.MinValue)
doc.Add(new Field("LastUpdatedDate", post.LastUpdatedDate.ToString("dd MMMM yyyy"), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("Author", post.Author, Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("Body", new Post(post.Slug, posts).BodyWithoutHtml, Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
}
}
示例5: SetUp
/// <summary>
/// Set up a new index in RAM with three test phrases and the supplied Analyzer.
/// </summary>
/// <exception cref="Exception"> if an error occurs with index writer or searcher </exception>
public override void SetUp()
{
base.SetUp();
analyzer = new ShingleAnalyzerWrapper(new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false), 2);
directory = NewDirectory();
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc;
doc = new Document();
doc.Add(new TextField("content", "please divide this sentence into shingles", Field.Store.YES));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new TextField("content", "just another test sentence", Field.Store.YES));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new TextField("content", "a sentence which contains no test", Field.Store.YES));
writer.AddDocument(doc);
writer.Dispose();
reader = DirectoryReader.Open(directory);
searcher = NewSearcher(reader);
}
示例6: SetUp
public override void SetUp()
{
base.SetUp();
store = NewDirectory();
IndexWriter writer = new IndexWriter(store, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false)));
Document doc;
doc = new Document();
doc.Add(NewTextField("aaa", "foo", Field.Store.YES));
writer.AddDocument(doc);
doc = new Document();
doc.Add(NewTextField("aaa", "foo", Field.Store.YES));
writer.AddDocument(doc);
doc = new Document();
doc.Add(NewTextField("contents", "Tom", Field.Store.YES));
writer.AddDocument(doc);
doc = new Document();
doc.Add(NewTextField("contents", "Jerry", Field.Store.YES));
writer.AddDocument(doc);
doc = new Document();
doc.Add(NewTextField("zzz", "bar", Field.Store.YES));
writer.AddDocument(doc);
writer.ForceMerge(1);
writer.Dispose();
}
示例7: IndexAndCrashOnCreateOutputSegments2
/// <summary>
/// index 1 document and commit.
/// prepare for crashing.
/// index 1 more document, and upon commit, creation of segments_2 will crash.
/// </summary>
private void IndexAndCrashOnCreateOutputSegments2()
{
Directory realDirectory = FSDirectory.Open(Path);
CrashAfterCreateOutput crashAfterCreateOutput = new CrashAfterCreateOutput(realDirectory);
// NOTE: cannot use RandomIndexWriter because it
// sometimes commits:
IndexWriter indexWriter = new IndexWriter(crashAfterCreateOutput, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
indexWriter.AddDocument(Document);
// writes segments_1:
indexWriter.Commit();
crashAfterCreateOutput.GetCrashAfterCreateOutput = "segments_2";
indexWriter.AddDocument(Document);
try
{
// tries to write segments_2 but hits fake exc:
indexWriter.Commit();
Assert.Fail("should have hit CrashingException");
}
catch (CrashingException e)
{
// expected
}
// writes segments_3
indexWriter.Dispose();
Assert.IsFalse(SlowFileExists(realDirectory, "segments_2"));
crashAfterCreateOutput.Dispose();
}
示例8: TestSpanRegex
public void TestSpanRegex()
{
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
// doc.Add(new Field("field", "the quick brown fox jumps over the lazy dog",
// Field.Store.NO, Field.Index.ANALYZED));
// writer.AddDocument(doc);
// doc = new Document();
doc.Add(new Field("field", "auto update", Field.Store.NO,
Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("field", "first auto update", Field.Store.NO,
Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
IndexSearcher searcher = new IndexSearcher(directory, true);
SpanRegexQuery srq = new SpanRegexQuery(new Term("field", "aut.*"));
SpanFirstQuery sfq = new SpanFirstQuery(srq, 1);
// SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {srq, stq}, 6,
// true);
int numHits = searcher.Search(sfq, null, 1000).TotalHits;
Assert.AreEqual(1, numHits);
}
示例9: SetUp
public void SetUp()
{
IndexWriter writer = new IndexWriter(store, new WhitespaceAnalyzer(), true);
Document doc;
doc = new Document();
doc.Add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("contents", "Tom", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("contents", "Jerry", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("zzz", "bar", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
}
示例10: TestReadersWriters
public void TestReadersWriters()
{
Directory dir;
using(dir = new RAMDirectory())
{
Document doc;
IndexWriter writer;
IndexReader reader;
using (writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED))
{
Field field = new Field("name", "value", Field.Store.YES,Field.Index.ANALYZED);
doc = new Document();
doc.Add(field);
writer.AddDocument(doc);
writer.Commit();
using (reader = writer.GetReader())
{
IndexReader r1 = reader.Reopen();
}
Assert.Throws<AlreadyClosedException>(() => reader.Reopen(), "IndexReader shouldn't be open here");
}
Assert.Throws<AlreadyClosedException>(() => writer.AddDocument(doc), "IndexWriter shouldn't be open here");
Assert.IsTrue(dir.isOpen_ForNUnit, "RAMDirectory");
}
Assert.IsFalse(dir.isOpen_ForNUnit, "RAMDirectory");
}
示例11: Indexer
public void Indexer()
{
var directory = FSDirectory.Open(new DirectoryInfo(Dic));
var analyzer = new StandardAnalyzer(Version.LUCENE_29);
var writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
foreach (var item in XeDal.SelectAll())
{
var doc = new Document();
doc.Add(new Field("Ten", item.Ten, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("NoiDung", string.Format("{0} {1}", item.Ten, item.GioiThieu), Field.Store.YES,
Field.Index.ANALYZED));
doc.Add(new Field("RowId", item.RowId.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("ID", item.Id.ToString(), Field.Store.YES, Field.Index.TOKENIZED));
doc.Add(new Field("Url", item.XeUrl, Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.AddDocument(doc);
}
foreach (var item in NhomDal.SelectAll())
{
var doc = new Document();
doc.Add(new Field("Ten", item.Ten, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("NoiDung", string.Format("{0} {1} {2}", item.Ten, item.GioiThieu, item.MoTa), Field.Store.YES,
Field.Index.ANALYZED));
doc.Add(new Field("RowId", item.RowId.ToString(), Field.Store.YES, Field.Index.TOKENIZED));
doc.Add(new Field("ID", item.Id.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("Url", item.Url, Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.AddDocument(doc);
}
foreach (var item in BinhLuanDal.SelectAll())
{
item.Ten = string.Format("{0} bình luận ngày {1}", item.Username, Lib.TimeDiff(item.NgayTao));
var doc = new Document();
doc.Add(new Field("Ten", item.Ten, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("NoiDung", string.Format("{0} {1}", item.Ten, item.NoiDung), Field.Store.YES,
Field.Index.ANALYZED));
doc.Add(new Field("RowId", item.RowId.ToString(), Field.Store.YES, Field.Index.TOKENIZED));
doc.Add(new Field("ID", item.Id.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("Url", item.Url, Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.AddDocument(doc);
}
foreach (var item in BlogDal.SelectAll())
{
var doc = new Document();
doc.Add(new Field("Ten", item.Ten, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("NoiDung", string.Format("{0} {1}", item.Ten, item.NoiDung), Field.Store.YES,
Field.Index.ANALYZED));
doc.Add(new Field("RowId", item.RowId.ToString(), Field.Store.YES, Field.Index.TOKENIZED));
doc.Add(new Field("ID", item.Id.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("Url", item.Url, Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.AddDocument(doc);
}
writer.Optimize();
writer.Commit();
writer.Close();
}
示例12: TestFilterIndexReader
public virtual void TestFilterIndexReader()
{
Directory directory = NewDirectory();
IndexWriter writer = new IndexWriter(directory, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
Document d1 = new Document();
d1.Add(NewTextField("default", "one two", Field.Store.YES));
writer.AddDocument(d1);
Document d2 = new Document();
d2.Add(NewTextField("default", "one three", Field.Store.YES));
writer.AddDocument(d2);
Document d3 = new Document();
d3.Add(NewTextField("default", "two four", Field.Store.YES));
writer.AddDocument(d3);
writer.Dispose();
Directory target = NewDirectory();
// We mess with the postings so this can fail:
((BaseDirectoryWrapper)target).CrossCheckTermVectorsOnClose = false;
writer = new IndexWriter(target, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
IndexReader reader = new TestReader(DirectoryReader.Open(directory));
writer.AddIndexes(reader);
writer.Dispose();
reader.Dispose();
reader = DirectoryReader.Open(target);
TermsEnum terms = MultiFields.GetTerms(reader, "default").Iterator(null);
while (terms.Next() != null)
{
Assert.IsTrue(terms.Term().Utf8ToString().IndexOf('e') != -1);
}
Assert.AreEqual(TermsEnum.SeekStatus.FOUND, terms.SeekCeil(new BytesRef("one")));
DocsAndPositionsEnum positions = terms.DocsAndPositions(MultiFields.GetLiveDocs(reader), null);
while (positions.NextDoc() != DocIdSetIterator.NO_MORE_DOCS)
{
Assert.IsTrue((positions.DocID() % 2) == 1);
}
reader.Dispose();
directory.Dispose();
target.Dispose();
}
示例13: TestMixedMerge
public virtual void TestMixedMerge()
{
Directory ram = NewDirectory();
Analyzer analyzer = new MockAnalyzer(Random());
IndexWriter writer = new IndexWriter(ram, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).SetMaxBufferedDocs(3).SetMergePolicy(NewLogMergePolicy(2)));
Document d = new Document();
// this field will have norms
Field f1 = NewTextField("f1", "this field has norms", Field.Store.NO);
d.Add(f1);
// this field will NOT have norms
FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
customType.OmitNorms = true;
Field f2 = NewField("f2", "this field has NO norms in all docs", customType);
d.Add(f2);
for (int i = 0; i < 30; i++)
{
writer.AddDocument(d);
}
// now we add another document which has norms for field f2 and not for f1 and verify if the SegmentMerger
// keep things constant
d = new Document();
// Reverese
d.Add(NewField("f1", "this field has norms", customType));
d.Add(NewTextField("f2", "this field has NO norms in all docs", Field.Store.NO));
for (int i = 0; i < 30; i++)
{
writer.AddDocument(d);
}
// force merge
writer.ForceMerge(1);
// flush
writer.Dispose();
SegmentReader reader = GetOnlySegmentReader(DirectoryReader.Open(ram));
FieldInfos fi = reader.FieldInfos;
Assert.IsTrue(fi.FieldInfo("f1").OmitsNorms(), "OmitNorms field bit should be set.");
Assert.IsTrue(fi.FieldInfo("f2").OmitsNorms(), "OmitNorms field bit should be set.");
reader.Dispose();
ram.Dispose();
}
示例14: TestSimpleSkip
public virtual void TestSimpleSkip()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Term term = new Term("test", "a");
for (int i = 0; i < 5000; i++)
{
Document d1 = new Document();
d1.Add(new Field(term.Field(), term.Text(), Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(d1);
}
writer.Flush();
writer.Optimize();
writer.Close();
IndexReader reader = SegmentReader.GetOnlySegmentReader(dir);
SegmentTermPositions tp = (SegmentTermPositions) reader.TermPositions();
tp.freqStream_ForNUnit = new CountingStream(this, tp.freqStream_ForNUnit);
for (int i = 0; i < 2; i++)
{
counter = 0;
tp.Seek(term);
CheckSkipTo(tp, 14, 185); // no skips
CheckSkipTo(tp, 17, 190); // one skip on level 0
CheckSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
// this test would fail if we had only one skip level,
// because than more bytes would be read from the freqStream
CheckSkipTo(tp, 4800, 250); // one skip on level 2
}
}
示例15: IndexIndicator
private static void IndexIndicator(IndicatorMetadata indicatorMetadata,
IEnumerable<IndicatorMetadataTextProperty> properties, IndexWriter writer)
{
Document doc = new Document();
doc.Add(new Field("id", indicatorMetadata.IndicatorId.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
var text = indicatorMetadata.Descriptive;
StringBuilder sb = new StringBuilder();
foreach (var indicatorMetadataTextProperty in properties)
{
var key = indicatorMetadataTextProperty.ColumnName;
if (text.ContainsKey(key))
{
sb.Append(text[key]);
sb.Append(" ");
}
}
doc.Add(new Field("IndicatorText",
sb.ToString().ToLower(), Field.Store.NO,
Field.Index.ANALYZED));
writer.AddDocument(doc);
}