本文整理汇总了C#中Lucene.Net.Documents.Field.SetValue方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Documents.Field.SetValue方法的具体用法?C# Lucene.Net.Documents.Field.SetValue怎么用?C# Lucene.Net.Documents.Field.SetValue使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Documents.Field
的用法示例。
在下文中一共展示了Lucene.Net.Documents.Field.SetValue方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestNoWaitClose
public virtual void TestNoWaitClose()
{
RAMDirectory directory = new MockRAMDirectory();
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
for (int pass = 0; pass < 2; pass++)
{
bool autoCommit = pass == 0;
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
for (int iter = 0; iter < 10; iter++)
{
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(100);
for (int j = 0; j < 201; j++)
{
idField.SetValue(System.Convert.ToString(iter * 201 + j));
writer.AddDocument(doc);
}
int delID = iter * 201;
for (int j = 0; j < 20; j++)
{
writer.DeleteDocuments(new Term("id", System.Convert.ToString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
writer.SetMergeFactor(3);
writer.AddDocument(doc);
writer.Flush();
writer.Close(false);
IndexReader reader = IndexReader.Open(directory);
Assert.AreEqual((1 + iter) * 182, reader.NumDocs());
reader.Close();
// Reopen
writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
}
writer.Close();
}
directory.Close();
}
示例2: TestFlushExceptions
public virtual void TestFlushExceptions()
{
MockRAMDirectory directory = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
directory.FailOn(failure);
IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 20; j++)
{
idField.SetValue(System.Convert.ToString(i * 20 + j));
writer.AddDocument(doc);
}
writer.AddDocument(doc);
failure.SetDoFail();
try
{
writer.Flush();
Assert.Fail("failed to hit IOException");
}
catch (System.IO.IOException ioe)
{
failure.ClearDoFail();
}
}
writer.Close();
IndexReader reader = IndexReader.Open(directory);
Assert.AreEqual(200, reader.NumDocs());
reader.Close();
directory.Close();
}
示例3: Create
private void Create()
{
// NOTE: put seed in here to make failures
// deterministic, but do not commit with a seed (to
// better test):
dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(17);
Document doc = new Document();
Document doc2 = new Document();
Field id = new Field("id", "", Field.Store.YES, Field.Index.NO);
doc.Add(id);
doc2.Add(id);
Field contents = new Field("contents", "", Field.Store.NO, Field.Index.ANALYZED);
doc.Add(contents);
doc2.Add(contents);
Field byteField = new Field("byte", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(byteField);
doc2.Add(byteField);
Field shortField = new Field("short", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(shortField);
doc2.Add(shortField);
Field intField = new Field("int", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(intField);
doc2.Add(intField);
Field longField = new Field("long", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(longField);
doc2.Add(longField);
Field floatField = new Field("float", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(floatField);
doc2.Add(floatField);
Field doubleField = new Field("double", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(doubleField);
doc2.Add(doubleField);
// we use two diff string fields so our FieldCache usage
// is less suspicious to cache inspection
Field stringField = new Field("string", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(stringField);
Field stringFieldIdx = new Field("stringIdx", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(stringFieldIdx);
// doc2 doesn't have stringField or stringFieldIdx, so we get nulls
for (int i = 0; i < NUM_DOCS; i++)
{
id.SetValue("" + i);
if (i % 1000 == 0)
{
contents.SetValue("a b c z");
}
else if (i % 100 == 0)
{
contents.SetValue("a b c y");
}
else if (i % 10 == 0)
{
contents.SetValue("a b c x");
}
else
{
contents.SetValue("a b c");
}
byteField.SetValue("" + NextInt((sbyte) System.SByte.MinValue, (sbyte) System.SByte.MaxValue));
if (NextInt(10) == 3)
{
shortField.SetValue("" + System.Int16.MinValue);
}
else if (NextInt(10) == 7)
{
shortField.SetValue("" + System.Int16.MaxValue);
}
else
{
shortField.SetValue("" + NextInt(System.Int16.MinValue, System.Int16.MaxValue));
}
if (NextInt(10) == 3)
{
intField.SetValue("" + System.Int32.MinValue);
}
else if (NextInt(10) == 7)
{
intField.SetValue("" + System.Int32.MaxValue);
}
else
{
intField.SetValue("" + this.r.Next());
}
if (NextInt(10) == 3)
//.........这里部分代码省略.........
示例4: TestDeleteMerging
public virtual void TestDeleteMerging()
{
RAMDirectory directory = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
LogDocMergePolicy mp = new LogDocMergePolicy(writer);
writer.SetMergePolicy(mp);
// Force degenerate merging so we can get a mix of
// merging of segments with and without deletes at the
// start:
mp.SetMinMergeDocs(1000);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 100; j++)
{
idField.SetValue(System.Convert.ToString(i * 100 + j));
writer.AddDocument(doc);
}
int delID = i;
while (delID < 100 * (1 + i))
{
writer.DeleteDocuments(new Term("id", "" + delID));
delID += 10;
}
writer.Flush();
}
writer.Close();
IndexReader reader = IndexReader.Open(directory);
// Verify that we did not lose any deletes...
Assert.AreEqual(450, reader.NumDocs());
reader.Close();
directory.Close();
}
示例5: TestDeletesNumDocs
public void TestDeletesNumDocs()
{
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(),
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.Add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
Field id = new Field("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(id);
id.SetValue("0");
w.AddDocument(doc);
id.SetValue("1");
w.AddDocument(doc);
IndexReader r = w.GetReader();
Assert.AreEqual(2, r.NumDocs());
r.Close();
w.DeleteDocuments(new Term("id", "0"));
r = w.GetReader();
Assert.AreEqual(1, r.NumDocs());
r.Close();
w.DeleteDocuments(new Term("id", "1"));
r = w.GetReader();
Assert.AreEqual(0, r.NumDocs());
r.Close();
w.Close();
dir.Close();
}
示例6: Run
override public void Run()
{
Document doc = new Document();
doc.Add(new Field("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED));
doc.Add(new Field("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.Add(new Field("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO));
doc.Add(new Field("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED));
doc.Add(new Field("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.Add(new Field("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
long stopTime = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 3000;
while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < stopTime)
{
System.Threading.Thread.SetData(Enclosing_Instance.doFail, this);
System.String id = "" + r.Next(50);
idField.SetValue(id);
Term idTerm = new Term("id", id);
try
{
writer.UpdateDocument(idTerm, doc);
}
catch (System.SystemException re)
{
if (Lucene.Net.Index.TestIndexWriterExceptions.DEBUG)
{
System.Console.Out.WriteLine("EXC: ");
System.Console.Out.WriteLine(re.StackTrace);
}
try
{
_TestUtil.CheckIndex(writer.GetDirectory());
}
catch (System.IO.IOException ioe)
{
System.Console.Out.WriteLine(SupportClass.ThreadClass.Current().Name + ": unexpected exception1");
System.Console.Out.WriteLine(ioe.StackTrace);
failure = ioe;
break;
}
}
catch (System.Exception t)
{
System.Console.Out.WriteLine(SupportClass.ThreadClass.Current().Name + ": unexpected exception2");
System.Console.Out.WriteLine(t.StackTrace);
failure = t;
break;
}
System.Threading.Thread.SetData(Enclosing_Instance.doFail, null);
// After a possible exception (above) I should be able
// to add a new document without hitting an
// exception:
try
{
writer.UpdateDocument(idTerm, doc);
}
catch (System.Exception t)
{
System.Console.Out.WriteLine(SupportClass.ThreadClass.Current().Name + ": unexpected exception3");
System.Console.Out.WriteLine(t.StackTrace);
failure = t;
break;
}
}
}
示例7: TestFlushExceptions
public virtual void TestFlushExceptions()
{
MockRAMDirectory directory = new MockRAMDirectory();
FailOnlyOnFlush failure = new FailOnlyOnFlush();
directory.FailOn(failure);
IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.SetMergeScheduler(cms);
writer.SetMaxBufferedDocs(2);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
int extraCount = 0;
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 20; j++)
{
idField.SetValue(System.Convert.ToString(i*20 + j));
writer.AddDocument(doc);
}
while (true)
{
// must cycle here because sometimes the merge flushes
// the doc we just added and so there's nothing to
// flush, and we don't hit the exception
writer.AddDocument(doc);
failure.SetDoFail();
try
{
writer.Flush(true, false, true);
if (failure.hitExc)
Assert.Fail("failed to hit IOException");
extraCount++;
}
catch (System.IO.IOException ioe)
{
failure.ClearDoFail();
break;
}
}
}
writer.Close();
IndexReader reader = IndexReader.Open(directory, true);
Assert.AreEqual(200 + extraCount, reader.NumDocs());
reader.Close();
directory.Close();
}
示例8: Run
override public void Run()
{
try
{
Document doc = new Document();
IndexReader r = IndexReader.Open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.Add(f);
int count = 0;
while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime && failed.Count == 0)
{
for (int j = 0; j < 10; j++)
{
String s = finalI + "_" + (count++).ToString();
f.SetValue(s);
w.AddDocument(doc);
w.Commit();
IndexReader r2 = r.Reopen();
Assert.IsTrue(r2 != r);
r.Close();
r = r2;
Assert.AreEqual(1, r.DocFreq(new Term("f", s)), "term=f:" + s);
}
}
r.Close();
}
catch (Exception t)
{
lock (failed)
{
failed.Add(this);
}
throw t;
}
}
示例9: TestNoWaitClose
public virtual void TestNoWaitClose()
{
RAMDirectory directory = new MockRAMDirectory();
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.Add(idField);
for (int pass = 0; pass < 3; pass++)
{
bool autoCommit = pass % 2 == 0;
IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true);
//System.out.println("TEST: pass=" + pass + " ac=" + autoCommit + " cms=" + (pass >= 2));
for (int iter = 0; iter < 10; iter++)
{
//System.out.println("TEST: iter=" + iter);
MergeScheduler ms;
if (pass >= 2)
ms = new ConcurrentMergeScheduler();
else
ms = new SerialMergeScheduler();
writer.SetMergeScheduler(ms);
writer.SetMaxBufferedDocs(2);
writer.SetMergeFactor(100);
for (int j = 0; j < 199; j++)
{
idField.SetValue(System.Convert.ToString(iter * 201 + j));
writer.AddDocument(doc);
}
int delID = iter * 199;
for (int j = 0; j < 20; j++)
{
writer.DeleteDocuments(new Term("id", System.Convert.ToString(delID)));
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
writer.SetMergeFactor(2);
IndexWriter finalWriter = writer;
System.Collections.ArrayList failure = new System.Collections.ArrayList();
SupportClass.ThreadClass t1 = new AnonymousClassThread1(finalWriter, doc, failure, this);
if (failure.Count > 0)
{
throw (System.Exception) failure[0];
}
t1.Start();
writer.Close(false);
t1.Join();
// Make sure reader can read
IndexReader reader = IndexReader.Open(directory);
reader.Close();
// Reopen
writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false);
}
writer.Close();
}
directory.Close();
}