本文整理汇总了C#中Lucene.Net.Documents.Document.Add方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Documents.Document.Add方法的具体用法?C# Lucene.Net.Documents.Document.Add怎么用?C# Lucene.Net.Documents.Document.Add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Documents.Document
的用法示例。
在下文中一共展示了Lucene.Net.Documents.Document.Add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Run
override public void Run()
{
try
{
for (int j = 0; j < Lucene.Net.Index.TestThreadedOptimize.NUM_ITER2; j++)
{
writerFinal.Optimize(false);
for (int k = 0; k < 17 * (1 + iFinal); k++)
{
Document d = new Document();
d.Add(new Field("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.NOT_ANALYZED));
d.Add(new Field("contents", English.IntToEnglish(iFinal + k), Field.Store.NO, Field.Index.ANALYZED));
writerFinal.AddDocument(d);
}
for (int k = 0; k < 9 * (1 + iFinal); k++)
writerFinal.DeleteDocuments(new Term("id", iterFinal + "_" + iFinal + "_" + j + "_" + k));
writerFinal.Optimize();
}
}
catch (System.Exception t)
{
Enclosing_Instance.setFailed();
System.Console.Out.WriteLine(SupportClass.ThreadClass.Current().Name + ": hit exception");
System.Console.Out.WriteLine(t.StackTrace);
}
}
示例2: SetUp
public virtual void SetUp()
{
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
Document doc = new Document();
doc.Add(Field.Text("Field", "one two three four five"));
doc.Add(Field.Text("sorter", "b"));
writer.AddDocument(doc);
doc = new Document();
doc.Add(Field.Text("Field", "one two three four"));
doc.Add(Field.Text("sorter", "d"));
writer.AddDocument(doc);
doc = new Document();
doc.Add(Field.Text("Field", "one two three y"));
doc.Add(Field.Text("sorter", "a"));
writer.AddDocument(doc);
doc = new Document();
doc.Add(Field.Text("Field", "one two x"));
doc.Add(Field.Text("sorter", "c"));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
searcher = new IndexSearcher(directory);
query = new TermQuery(new Term("Field", "three"));
filter = new AnonymousClassFilter(this);
}
示例3: Convert
public static LDocument Convert(Document doc, Schema schema)
{
var ldoc = new LDocument();
foreach (var sf in schema.Fields)
{
foreach (var lf in Convert(sf, doc))
{
ldoc.Add(lf);
}
}
ldoc.Add(
new LField(
SchemaNameField,
schema.Name,
ConvertToStore(true, false),
ConvertToIndexFlag(false, false)
)
);
ldoc.Add(
new LField(
SchemaVersionField,
schema.Version,
ConvertToStore(true, false),
ConvertToIndexFlag(false, false)
)
);
return ldoc;
}
示例4: SetUp
public override void SetUp()
{
base.SetUp();
Document doc;
RAMDirectory rd1 = new RAMDirectory();
IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
doc = new Document();
doc.Add(new Field("field1", "the quick brown fox jumps", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("field2", "the quick brown fox jumps", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("field4", "", Field.Store.NO, Field.Index.ANALYZED));
iw1.AddDocument(doc);
iw1.Close();
RAMDirectory rd2 = new RAMDirectory();
IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
doc = new Document();
doc.Add(new Field("field0", "", Field.Store.NO, Field.Index.ANALYZED));
doc.Add(new Field("field1", "the fox jumps over the lazy dog", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("field3", "the fox jumps over the lazy dog", Field.Store.YES, Field.Index.ANALYZED));
iw2.AddDocument(doc);
iw2.Close();
this.ir1 = IndexReader.Open(rd1, true);
this.ir2 = IndexReader.Open(rd2, true);
}
示例5: Document
/// <summary>Makes a document for a File.
/// <p>
/// The document has three fields:
/// <ul>
/// <li><code>path</code>--containing the pathname of the file, as a stored,
/// tokenized Field;
/// <li><code>modified</code>--containing the last modified date of the file as
/// a keyword Field as encoded by <a
/// href="lucene.document.DateField.html">DateField</a>; and
/// <li><code>contents</code>--containing the full contents of the file, as a
/// Reader Field;
/// </summary>
public static Document Document(System.IO.FileInfo f)
{
// make a new, empty document
Document doc = new Document();
// Add the path of the file as a Field named "path". Use a Text Field, so
// that the index stores the path, and so that the path is searchable
doc.Add(Field.Text("path", f.FullName));
// Add the last modified date of the file a Field named "modified". Use a
// Keyword Field, so that it's searchable, but so that no attempt is made
// to tokenize the Field into words.
doc.Add(Field.Keyword("modified", DateField.TimeToString(((f.LastWriteTime.Ticks - 621355968000000000) / 10000))));
// Add the contents of the file a Field named "contents". Use a Text
// Field, specifying a Reader, so that the text of the file is tokenized.
// ?? why doesn't FileReader work here ??
System.IO.FileStream is_Renamed = new System.IO.FileStream(f.FullName, System.IO.FileMode.Open, System.IO.FileAccess.Read);
System.IO.StreamReader reader = new System.IO.StreamReader(new System.IO.StreamReader(is_Renamed, System.Text.Encoding.Default).BaseStream, new System.IO.StreamReader(is_Renamed, System.Text.Encoding.Default).CurrentEncoding);
doc.Add(Field.Text("contents", reader));
// return the document
return doc;
}
示例6: Main
/*public TestCustomSearcherSort(System.String name):base(name)
{
}*/
/*[STAThread]
public static void Main(System.String[] argv)
{
// TestRunner.run(suite()); // {{Aroush-2.9}} how is this done in NUnit?
}*/
/*public static Test suite()
{
return new TestSuite(typeof(TestCustomSearcherSort));
}*/
// create an index for testing
private Directory GetIndex()
{
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
RandomGen random = new RandomGen(this, NewRandom());
for (int i = 0; i < INDEX_SIZE; ++i)
{
// don't decrease; if to low the problem doesn't show up
Document doc = new Document();
if ((i % 5) != 0)
{
// some documents must not have an entry in the first sort field
doc.Add(new Field("publicationDate_", random.GetLuceneDate(), Field.Store.YES, Field.Index.NOT_ANALYZED));
}
if ((i % 7) == 0)
{
// some documents to match the query (see below)
doc.Add(new Field("content", "test", Field.Store.YES, Field.Index.ANALYZED));
}
// every document has a defined 'mandant' field
doc.Add(new Field("mandant", System.Convert.ToString(i % 3), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.AddDocument(doc);
}
writer.Optimize();
writer.Close();
return indexStore;
}
示例7: SetUp
public override void SetUp()
{
base.SetUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.Add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("sorter", "b", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("field", "one two three four", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("sorter", "d", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("field", "one two three y", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("sorter", "a", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
doc = new Document();
doc.Add(new Field("field", "one two x", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("sorter", "c", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
searcher = new IndexSearcher(directory);
query = new TermQuery(new Term("field", "three"));
filter = NewStaticFilterB();
}
示例8: TestBefore
public virtual void TestBefore()
{
// create an index
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
long now = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond);
Document doc = new Document();
// add time that is in the past
doc.Add(new Field("datefield", DateTools.TimeToString(now - 1000, DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("body", "Today is a very sunny day in New York City", Field.Store.YES, Field.Index.ANALYZED));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
IndexSearcher searcher = new IndexSearcher(indexStore, true);
// filter that should preserve matches
//DateFilter df1 = DateFilter.Before("datefield", now);
TermRangeFilter df1 = new TermRangeFilter("datefield", DateTools.TimeToString(now - 2000, DateTools.Resolution.MILLISECOND), DateTools.TimeToString(now, DateTools.Resolution.MILLISECOND), false, true);
// filter that should discard matches
//DateFilter df2 = DateFilter.Before("datefield", now - 999999);
TermRangeFilter df2 = new TermRangeFilter("datefield", DateTools.TimeToString(0, DateTools.Resolution.MILLISECOND), DateTools.TimeToString(now - 2000, DateTools.Resolution.MILLISECOND), true, false);
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Query query2 = new TermQuery(new Term("body", "sunny"));
ScoreDoc[] result;
// ensure that queries return expected results without DateFilter first
result = searcher.Search(query1, null, 1000).ScoreDocs;
Assert.AreEqual(0, result.Length);
result = searcher.Search(query2, null, 1000).ScoreDocs;
Assert.AreEqual(1, result.Length);
// run queries with DateFilter
result = searcher.Search(query1, df1, 1000).ScoreDocs;
Assert.AreEqual(0, result.Length);
result = searcher.Search(query1, df2, 1000).ScoreDocs;
Assert.AreEqual(0, result.Length);
result = searcher.Search(query2, df1, 1000).ScoreDocs;
Assert.AreEqual(1, result.Length);
result = searcher.Search(query2, df2, 1000).ScoreDocs;
Assert.AreEqual(0, result.Length);
}
示例9: IndexFile
public void IndexFile(string filePath)
{
Lucene.Net.Documents.Document document = new Lucene.Net.Documents.Document();
System.IO.StreamReader reader = new System.IO.StreamReader(filePath);
document.Add(new Lucene.Net.Documents.Field("contents", reader));
document.Add(new Lucene.Net.Documents.Field("filepath", filePath, Lucene.Net.Documents.Field.Store.YES, Lucene.Net.Documents.Field.Index.NOT_ANALYZED));
this.IndexWriter.AddDocument(document);
}
示例10: TestBefore
public virtual void TestBefore()
{
// create an index
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
long now = (System.DateTime.Now.Ticks - 621355968000000000) / 10000;
Document doc = new Document();
// add time that is in the past
doc.Add(Field.Keyword("datefield", DateField.TimeToString(now - 1000)));
doc.Add(Field.Text("body", "Today is a very sunny day in New York City"));
writer.AddDocument(doc);
writer.Optimize();
writer.Close();
IndexSearcher searcher = new IndexSearcher(indexStore);
// filter that should preserve matches
DateFilter df1 = DateFilter.Before("datefield", now);
// filter that should discard matches
DateFilter df2 = DateFilter.Before("datefield", now - 999999);
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Query query2 = new TermQuery(new Term("body", "sunny"));
Hits result;
// ensure that queries return expected results without DateFilter first
result = searcher.Search(query1);
Assert.AreEqual(0, result.Length());
result = searcher.Search(query2);
Assert.AreEqual(1, result.Length());
// run queries with DateFilter
result = searcher.Search(query1, df1);
Assert.AreEqual(0, result.Length());
result = searcher.Search(query1, df2);
Assert.AreEqual(0, result.Length());
result = searcher.Search(query2, df1);
Assert.AreEqual(1, result.Length());
result = searcher.Search(query2, df2);
Assert.AreEqual(0, result.Length());
}
示例11: SetUp
public override void SetUp()
{
base.SetUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true);
Document doc = new Document();
doc.Add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.Add(new Field("description", "Illidium Space Modulator", Field.Store.YES, Field.Index.TOKENIZED));
writer.AddDocument(doc);
writer.Close();
searcher = new IndexSearcher(directory);
}
示例12: SetUp
public override void SetUp()
{
base.SetUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.Length; i++)
{
Document doc = new Document();
doc.Add(new Field(KEY, "" + i, Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.Add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(doc);
}
writer.Close();
searcher = new IndexSearcher(directory);
}
示例13: TestBasic
public virtual void TestBasic()
{
AssumeTrue("Test requires SortedSetDV support", DefaultCodecSupportsSortedSet());
Directory dir = NewDirectory();
FacetsConfig config = new FacetsConfig();
config.SetMultiValued("a", true);
RandomIndexWriter writer = new RandomIndexWriter(Random(), dir);
Document doc = new Document();
doc.Add(new SortedSetDocValuesFacetField("a", "foo"));
doc.Add(new SortedSetDocValuesFacetField("a", "bar"));
doc.Add(new SortedSetDocValuesFacetField("a", "zoo"));
doc.Add(new SortedSetDocValuesFacetField("b", "baz"));
writer.AddDocument(config.Build(doc));
if (Random().NextBoolean())
{
writer.Commit();
}
doc = new Document();
doc.Add(new SortedSetDocValuesFacetField("a", "foo"));
writer.AddDocument(config.Build(doc));
// NRT open
IndexSearcher searcher = NewSearcher(writer.Reader);
// Per-top-reader state:
SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(searcher.IndexReader);
FacetsCollector c = new FacetsCollector();
searcher.Search(new MatchAllDocsQuery(), c);
SortedSetDocValuesFacetCounts facets = new SortedSetDocValuesFacetCounts(state, c);
Assert.AreEqual("dim=a path=[] value=4 childCount=3\n foo (2)\n bar (1)\n zoo (1)\n", facets.GetTopChildren(10, "a").ToString());
Assert.AreEqual("dim=b path=[] value=1 childCount=1\n baz (1)\n", facets.GetTopChildren(10, "b").ToString());
// DrillDown:
DrillDownQuery q = new DrillDownQuery(config);
q.Add("a", "foo");
q.Add("b", "baz");
TopDocs hits = searcher.Search(q, 1);
Assert.AreEqual(1, hits.TotalHits);
IOUtils.Close(writer, searcher.IndexReader, dir);
}
示例14: Run
override public void Run()
{
Document doc = new Document();
doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
while ((System.DateTime.Now.Ticks - 621355968000000000) / 10000 < stopTime)
{
for (int i = 0; i < 27; i++)
{
try
{
writer.AddDocument(doc);
}
catch (System.IO.IOException cie)
{
System.SystemException re = new System.SystemException("addDocument failed", cie);
throw re;
}
}
try
{
System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 1));
}
catch (System.Threading.ThreadInterruptedException)
{
SupportClass.ThreadClass.Current().Interrupt();
}
}
}
示例15: CreateRandomTerms
public virtual void CreateRandomTerms(int nDocs, int nTerms, double power, Directory dir)
{
int[] freq = new int[nTerms];
for (int i = 0; i < nTerms; i++)
{
int f = (nTerms + 1) - i; // make first terms less frequent
freq[i] = (int) System.Math.Ceiling(System.Math.Pow(f, power));
terms[i] = new Term("f", System.Convert.ToString((char) ('A' + i)));
}
IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
for (int i = 0; i < nDocs; i++)
{
Document d = new Document();
for (int j = 0; j < nTerms; j++)
{
if (r.Next(freq[j]) == 0)
{
d.Add(new Field("f", terms[j].Text(), Field.Store.NO, Field.Index.UN_TOKENIZED));
//System.out.println(d);
}
}
iw.AddDocument(d);
}
iw.Optimize();
iw.Close();
}