本文整理汇总了C#中Lucene.Net.Index.Term.Text方法的典型用法代码示例。如果您正苦于以下问题:C# Term.Text方法的具体用法?C# Term.Text怎么用?C# Term.Text使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Index.Term
的用法示例。
在下文中一共展示了Term.Text方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestSimpleSkip
public virtual void TestSimpleSkip()
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Term term = new Term("test", "a");
for (int i = 0; i < 5000; i++)
{
Document d1 = new Document();
d1.Add(new Field(term.Field(), term.Text(), Field.Store.NO, Field.Index.ANALYZED));
writer.AddDocument(d1);
}
writer.Flush();
writer.Optimize();
writer.Close();
IndexReader reader = SegmentReader.GetOnlySegmentReader(dir);
SegmentTermPositions tp = (SegmentTermPositions) reader.TermPositions();
tp.freqStream_ForNUnit = new CountingStream(this, tp.freqStream_ForNUnit);
for (int i = 0; i < 2; i++)
{
counter = 0;
tp.Seek(term);
CheckSkipTo(tp, 14, 185); // no skips
CheckSkipTo(tp, 17, 190); // one skip on level 0
CheckSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
// this test would fail if we had only one skip level,
// because than more bytes would be read from the freqStream
CheckSkipTo(tp, 4800, 250); // one skip on level 2
}
}
示例2: TermCompare
public override bool TermCompare(Term term)
{
prefix = base.GetPrefixTerm();
if ((System.Object)term.Field() == (System.Object)prefix.Field() && term.Text().Equals(prefix.Text()))
{
return true;
}
endEnum = true;
return false;
}
示例3: TermCompare
/// <summary>Equality compare on the term </summary>
public override bool TermCompare(Term term)
{
if (_sField == term.Field())
{
string sSearchText = term.Text();
if (sSearchText.StartsWith(_sPre)) return _regexImpl.Match(sSearchText);
} //eif
_bEndEnum = true;
return false;
}
示例4: RegexTermEnum
public RegexTermEnum(IndexReader reader, Term term, IRegexCapabilities regexImpl)
{
_sField = term.Field();
string sText = term.Text();
_regexImpl = regexImpl;
_regexImpl.Compile(sText);
_sPre = _regexImpl.Prefix() ?? "";
SetEnum(reader.Terms(new Term(term.Field(), _sPre)));
}
示例5: Count
public static int Count(Term t, IndexReader r)
{
int count = 0;
DocsEnum td = TestUtil.Docs(Random(), r, t.Field(), new BytesRef(t.Text()), MultiFields.GetLiveDocs(r), null, 0);
if (td != null)
{
while (td.NextDoc() != DocIdSetIterator.NO_MORE_DOCS)
{
td.DocID();
count++;
}
}
return count;
}
示例6: TestLockObtainFailed
public virtual void TestLockObtainFailed()
{
Directory dir = new RAMDirectory();
IndexWriter writer = null;
IndexReader reader = null;
Term searchTerm = new Term("content", "aaa");
// add 11 documents with term : aaa
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
for (int i = 0; i < 11; i++)
{
AddDoc(writer, searchTerm.Text());
}
// Create reader:
reader = IndexReader.Open(dir);
// Try to make changes
try
{
reader.DeleteDocument(4);
Assert.Fail("deleteDocument should have hit LockObtainFailedException");
}
catch (LockObtainFailedException)
{
// expected
}
try
{
reader.SetNorm(5, "aaa", 2.0f);
Assert.Fail("setNorm should have hit LockObtainFailedException");
}
catch (LockObtainFailedException)
{
// expected
}
try
{
reader.UndeleteAll();
Assert.Fail("undeleteAll should have hit LockObtainFailedException");
}
catch (LockObtainFailedException)
{
// expected
}
writer.Close();
reader.Close();
}
示例7: TestWritingNorms
public virtual void TestWritingNorms()
{
System.String tempDir = SupportClass.AppSettings.Get("tempDir", "");
if (tempDir == null)
throw new System.IO.IOException("tempDir undefined, cannot run test");
System.IO.FileInfo indexDir = new System.IO.FileInfo(tempDir + "\\" + "lucenetestnormwriter");
Directory dir = FSDirectory.GetDirectory(indexDir);
IndexWriter writer;
IndexReader reader;
Term searchTerm = new Term("content", "aaa");
// add 1 documents with term : aaa
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
AddDoc(writer, searchTerm.Text());
writer.Close();
// now open reader & set norm for doc 0
reader = IndexReader.Open(dir);
reader.SetNorm(0, "content", (float) 2.0);
// we should be holding the write lock now:
Assert.IsTrue(IndexReader.IsLocked(dir), "locked");
reader.Commit();
// we should not be holding the write lock now:
Assert.IsTrue(!IndexReader.IsLocked(dir), "not locked");
// open a 2nd reader:
IndexReader reader2 = IndexReader.Open(dir);
// set norm again for doc 0
reader.SetNorm(0, "content", (float) 3.0);
Assert.IsTrue(IndexReader.IsLocked(dir), "locked");
reader.Close();
// we should not be holding the write lock now:
Assert.IsTrue(!IndexReader.IsLocked(dir), "not locked");
reader2.Close();
dir.Close();
RmDir(indexDir);
}
示例8: Set
public void Set(Term term)
{
if (term == null)
{
Reset();
return ;
}
// copy text into the buffer
SetTextLength(term.Text().Length);
System.String sourceString = term.Text();
int sourceEnd = term.Text().Length;
for (int i = 0; i < sourceEnd; i++)
{
text[i] = (char) sourceString[i];
}
this.field = term.Field();
this.term = term;
}
示例9: Set
public void Set(Term term)
{
if (term == null)
{
Reset();
return ;
}
string termText = term.Text();
int termLen = termText.Length;
text.setLength(termLen);
for (int i = 0; i < termLen; i++)
{
text.result[i] = (char) termText[i];
}
dirty = true;
field = term.Field();
this.term = term;
}
示例10: AssertTermDocsCount
internal virtual void AssertTermDocsCount(string msg, IndexReader reader, Term term, int expected)
{
DocsEnum tdocs = TestUtil.Docs(Random(), reader, term.Field(), new BytesRef(term.Text()), MultiFields.GetLiveDocs(reader), null, 0);
int count = 0;
if (tdocs != null)
{
while (tdocs.NextDoc() != DocIdSetIterator.NO_MORE_DOCS)
{
count++;
}
}
Assert.AreEqual(expected, count, msg + ", count mismatch");
}
示例11: TestSimpleSkip
public virtual void TestSimpleSkip()
{
Directory dir = new CountingRAMDirectory(this, new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).SetCodec(TestUtil.AlwaysPostingsFormat(new Lucene41PostingsFormat())).SetMergePolicy(NewLogMergePolicy()));
Term term = new Term("test", "a");
for (int i = 0; i < 5000; i++)
{
Document d1 = new Document();
d1.Add(NewTextField(term.Field(), term.Text(), Field.Store.NO));
writer.AddDocument(d1);
}
writer.Commit();
writer.ForceMerge(1);
writer.Dispose();
AtomicReader reader = GetOnlySegmentReader(DirectoryReader.Open(dir));
for (int i = 0; i < 2; i++)
{
Counter = 0;
DocsAndPositionsEnum tp = reader.TermPositionsEnum(term);
CheckSkipTo(tp, 14, 185); // no skips
CheckSkipTo(tp, 17, 190); // one skip on level 0
CheckSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
// this test would fail if we had only one skip level,
// because than more bytes would be read from the freqStream
CheckSkipTo(tp, 4800, 250); // one skip on level 2
}
}
示例12: contextMenuItemShowAll_Click
private void contextMenuItemShowAll_Click(object sender, System.EventArgs e)
{
if (listTerms.SelectedItems == null) return;
ListViewItem selItem = listTerms.SelectedItems[0];
if (selItem == null) return;
string field = selItem.SubItems[2].Text.Trim().Substring(1, selItem.SubItems[2].Text.Trim().Length - 2);
string text = selItem.SubItems[3].Text;
if (field == null || text == null)
return;
Term t = new Term(field, text);
_luke.Search(t.Field() + ":" + t.Text());
}
示例13: DeleteReaderWriterConflict
private void DeleteReaderWriterConflict(bool optimize)
{
//Directory dir = new RAMDirectory();
Directory dir = GetDirectory(true);
Term searchTerm = new Term("content", "aaa");
Term searchTerm2 = new Term("content", "bbb");
// add 100 documents with term : aaa
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
for (int i = 0; i < 100; i++)
{
AddDoc(writer, searchTerm.Text());
}
writer.Close();
// OPEN READER AT THIS POINT - this should fix the view of the
// index at the point of having 100 "aaa" documents and 0 "bbb"
IndexReader reader = IndexReader.Open(dir);
Assert.AreEqual(100, reader.DocFreq(searchTerm), "first docFreq");
Assert.AreEqual(0, reader.DocFreq(searchTerm2), "first docFreq");
AssertTermDocsCount("first reader", reader, searchTerm, 100);
AssertTermDocsCount("first reader", reader, searchTerm2, 0);
// add 100 documents with term : bbb
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
for (int i = 0; i < 100; i++)
{
AddDoc(writer, searchTerm2.Text());
}
// REQUEST OPTIMIZATION
// This causes a new segment to become current for all subsequent
// searchers. Because of this, deletions made via a previously open
// reader, which would be applied to that reader's segment, are lost
// for subsequent searchers/readers
if (optimize)
writer.Optimize();
writer.Close();
// The reader should not see the new data
Assert.AreEqual(100, reader.DocFreq(searchTerm), "first docFreq");
Assert.AreEqual(0, reader.DocFreq(searchTerm2), "first docFreq");
AssertTermDocsCount("first reader", reader, searchTerm, 100);
AssertTermDocsCount("first reader", reader, searchTerm2, 0);
// DELETE DOCUMENTS CONTAINING TERM: aaa
// NOTE: the reader was created when only "aaa" documents were in
int deleted = 0;
try
{
deleted = reader.Delete(searchTerm);
Assert.Fail("Delete allowed on an index reader with stale segment information");
}
catch (System.IO.IOException e)
{
/* success */
}
// Re-open index reader and try again. This time it should see
// the new data.
reader.Close();
reader = IndexReader.Open(dir);
Assert.AreEqual(100, reader.DocFreq(searchTerm), "first docFreq");
Assert.AreEqual(100, reader.DocFreq(searchTerm2), "first docFreq");
AssertTermDocsCount("first reader", reader, searchTerm, 100);
AssertTermDocsCount("first reader", reader, searchTerm2, 100);
deleted = reader.Delete(searchTerm);
Assert.AreEqual(100, deleted, "deleted count");
Assert.AreEqual(100, reader.DocFreq(searchTerm), "deleted docFreq");
Assert.AreEqual(100, reader.DocFreq(searchTerm2), "deleted docFreq");
AssertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
AssertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
reader.Close();
// CREATE A NEW READER and re-test
reader = IndexReader.Open(dir);
Assert.AreEqual(100, reader.DocFreq(searchTerm), "deleted docFreq");
Assert.AreEqual(100, reader.DocFreq(searchTerm2), "deleted docFreq");
AssertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
AssertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
reader.Close();
}
示例14: VisitTerm
public override Term VisitTerm(Term term)
{
if (term.Text().Equals(ContentQuery.EmptyText))
return null;
return base.VisitTerm(term);
}
示例15: TestSplitSeq
public void TestSplitSeq()
{
MultiPassIndexSplitter splitter = new MultiPassIndexSplitter();
Directory[] dirs = new Directory[]{
NewDirectory(),
NewDirectory(),
NewDirectory()
};
try
{
splitter.Split(TEST_VERSION_CURRENT, input, dirs, true);
Document doc;
int start;
IndexReader ir;
using (ir = DirectoryReader.Open(dirs[0]))
{
assertTrue(ir.NumDocs - NUM_DOCS / 3 <= 1);
doc = ir.Document(0);
assertEquals("0", doc.Get("id"));
start = ir.NumDocs;
}
using (ir = DirectoryReader.Open(dirs[1]))
{
assertTrue(ir.NumDocs - NUM_DOCS / 3 <= 1);
doc = ir.Document(0);
assertEquals(start + "", doc.Get("id"));
start += ir.NumDocs;
}
using (ir = DirectoryReader.Open(dirs[2]))
{
assertTrue(ir.NumDocs - NUM_DOCS / 3 <= 1);
doc = ir.Document(0);
assertEquals(start + "", doc.Get("id"));
// make sure the deleted doc is not here
TermsEnum te = MultiFields.GetTerms(ir, "id").Iterator(null);
Term t = new Term("id", (NUM_DOCS - 1) + "");
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.SeekCeil(new BytesRef(t.Text())));
assertNotSame(t.Text(), te.Term().Utf8ToString());
}
}
finally
{
foreach (Directory d in dirs)
{
d.Dispose();
}
}
}