本文整理汇总了C#中RandomIndexWriter.DeleteDocuments方法的典型用法代码示例。如果您正苦于以下问题:C# RandomIndexWriter.DeleteDocuments方法的具体用法?C# RandomIndexWriter.DeleteDocuments怎么用?C# RandomIndexWriter.DeleteDocuments使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类RandomIndexWriter
的用法示例。
在下文中一共展示了RandomIndexWriter.DeleteDocuments方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Test
public virtual void Test()
{
Directory dir = NewDirectory();
IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
conf.SetCodec(new Lucene46Codec());
RandomIndexWriter riw = new RandomIndexWriter(Random(), dir, conf);
Document doc = new Document();
// these fields should sometimes get term vectors, etc
Field idField = NewStringField("id", "", Field.Store.NO);
Field bodyField = NewTextField("body", "", Field.Store.NO);
Field dvField = new NumericDocValuesField("dv", 5);
doc.Add(idField);
doc.Add(bodyField);
doc.Add(dvField);
for (int i = 0; i < 100; i++)
{
idField.StringValue = Convert.ToString(i);
bodyField.StringValue = TestUtil.RandomUnicodeString(Random());
riw.AddDocument(doc);
if (Random().Next(7) == 0)
{
riw.Commit();
}
if (Random().Next(20) == 0)
{
riw.DeleteDocuments(new Term("id", Convert.ToString(i)));
}
}
riw.Dispose();
CheckHeaders(dir);
dir.Dispose();
}
示例2: CreateRandomIndexes
private void CreateRandomIndexes(int maxSegments)
{
dir = NewDirectory();
numDocs = AtLeast(150);
int numTerms = TestUtil.NextInt(Random(), 1, numDocs / 5);
ISet<string> randomTerms = new HashSet<string>();
while (randomTerms.size() < numTerms)
{
randomTerms.add(TestUtil.RandomSimpleString(Random()));
}
terms = new List<string>(randomTerms);
int seed = Random().Next();
IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(new Random(seed)));
iwc.SetMergePolicy(TestSortingMergePolicy.NewSortingMergePolicy(sort));
iw = new RandomIndexWriter(new Random(seed), dir, iwc);
for (int i = 0; i < numDocs; ++i)
{
Document doc = RandomDocument();
iw.AddDocument(doc);
if (i == numDocs / 2 || (i != numDocs - 1 && Random().nextInt(8) == 0))
{
iw.Commit();
}
if (Random().nextInt(15) == 0)
{
string term = RandomInts.RandomFrom(Random(), terms);
iw.DeleteDocuments(new Term("s", term));
}
}
reader = iw.Reader;
}
示例3: TestSumDocFreq_Mem
public virtual void TestSumDocFreq_Mem()
{
int numDocs = AtLeast(500);
Directory dir = NewDirectory();
RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, Similarity, TimeZone);
Document doc = new Document();
Field id = NewStringField("id", "", Field.Store.NO);
Field field1 = NewTextField("foo", "", Field.Store.NO);
Field field2 = NewTextField("bar", "", Field.Store.NO);
doc.Add(id);
doc.Add(field1);
doc.Add(field2);
for (int i = 0; i < numDocs; i++)
{
id.StringValue = "" + i;
char ch1 = (char)TestUtil.NextInt(Random(), 'a', 'z');
char ch2 = (char)TestUtil.NextInt(Random(), 'a', 'z');
field1.StringValue = "" + ch1 + " " + ch2;
ch1 = (char)TestUtil.NextInt(Random(), 'a', 'z');
ch2 = (char)TestUtil.NextInt(Random(), 'a', 'z');
field2.StringValue = "" + ch1 + " " + ch2;
writer.AddDocument(doc);
}
IndexReader ir = writer.Reader;
AssertSumDocFreq(ir);
ir.Dispose();
int numDeletions = AtLeast(20);
for (int i = 0; i < numDeletions; i++)
{
writer.DeleteDocuments(new Term("id", "" + Random().Next(numDocs)));
}
writer.ForceMerge(1);
writer.Dispose();
ir = DirectoryReader.Open(dir);
AssertSumDocFreq(ir);
ir.Dispose();
dir.Dispose();
}
示例4: TestAdvanceSingleDeletedParentNoChild
public void TestAdvanceSingleDeletedParentNoChild()
{
Directory dir = NewDirectory();
RandomIndexWriter w = new RandomIndexWriter(Random(), dir);
// First doc with 1 children
Document parentDoc = new Document();
parentDoc.Add(NewStringField("parent", "1", Field.Store.NO));
parentDoc.Add(NewStringField("isparent", "yes", Field.Store.NO));
Document childDoc = new Document();
childDoc.Add(NewStringField("child", "1", Field.Store.NO));
w.AddDocuments(Arrays.AsList(childDoc, parentDoc));
parentDoc = new Document();
parentDoc.Add(NewStringField("parent", "2", Field.Store.NO));
parentDoc.Add(NewStringField("isparent", "yes", Field.Store.NO));
w.AddDocuments(Arrays.AsList(parentDoc));
w.DeleteDocuments(new Term("parent", "2"));
parentDoc = new Document();
parentDoc.Add(NewStringField("parent", "2", Field.Store.NO));
parentDoc.Add(NewStringField("isparent", "yes", Field.Store.NO));
childDoc = new Document();
childDoc.Add(NewStringField("child", "2", Field.Store.NO));
w.AddDocuments(Arrays.AsList(childDoc, parentDoc));
IndexReader r = w.Reader;
w.Dispose();
IndexSearcher s = NewSearcher(r);
// Create a filter that defines "parent" documents in the index - in this case resumes
Filter parentsFilter = new FixedBitSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("isparent", "yes"))));
Query parentQuery = new TermQuery(new Term("parent", "2"));
ToChildBlockJoinQuery parentJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentsFilter, Random().NextBoolean());
TopDocs topdocs = s.Search(parentJoinQuery, 3);
assertEquals(1, topdocs.TotalHits);
r.Dispose();
dir.Dispose();
}
示例5: SetUp
public override void SetUp()
{
base.SetUp();
Dir = NewDirectory();
Iw = new RandomIndexWriter(Random(), Dir);
Document doc = new Document();
Field idField = new StringField("id", "", Field.Store.NO);
doc.Add(idField);
// add 500 docs with id 0..499
for (int i = 0; i < 500; i++)
{
idField.StringValue = Convert.ToString(i);
Iw.AddDocument(doc);
}
// delete 20 of them
for (int i = 0; i < 20; i++)
{
Iw.DeleteDocuments(new Term("id", Convert.ToString(Random().Next(Iw.MaxDoc()))));
}
Ir = Iw.Reader;
@is = NewSearcher(Ir);
}
示例6: TestEnforceDeletions
public virtual void TestEnforceDeletions()
{
Directory dir = NewDirectory();
RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergeScheduler(new SerialMergeScheduler()).SetMergePolicy(NewLogMergePolicy(10)));
// asserts below requires no unexpected merges:
// NOTE: cannot use writer.getReader because RIW (on
// flipping a coin) may give us a newly opened reader,
// but we use .reopen on this reader below and expect to
// (must) get an NRT reader:
DirectoryReader reader = DirectoryReader.Open(writer.w, true);
// same reason we don't wrap?
IndexSearcher searcher = NewSearcher(reader, false);
// add a doc, refresh the reader, and check that it's there
Document doc = new Document();
doc.Add(NewStringField("id", "1", Field.Store.YES));
writer.AddDocument(doc);
reader = RefreshReader(reader);
searcher = NewSearcher(reader, false);
TopDocs docs = searcher.Search(new MatchAllDocsQuery(), 1);
Assert.AreEqual(1, docs.TotalHits, "Should find a hit...");
Filter startFilter = new QueryWrapperFilter(new TermQuery(new Term("id", "1")));
CachingWrapperFilter filter = new CachingWrapperFilter(startFilter);
docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
Assert.IsTrue(filter.SizeInBytes() > 0);
Assert.AreEqual(1, docs.TotalHits, "[query + filter] Should find a hit...");
Query constantScore = new ConstantScoreQuery(filter);
docs = searcher.Search(constantScore, 1);
Assert.AreEqual(1, docs.TotalHits, "[just filter] Should find a hit...");
// make sure we get a cache hit when we reopen reader
// that had no change to deletions
// fake delete (deletes nothing):
writer.DeleteDocuments(new Term("foo", "bar"));
IndexReader oldReader = reader;
reader = RefreshReader(reader);
Assert.IsTrue(reader == oldReader);
int missCount = filter.MissCount;
docs = searcher.Search(constantScore, 1);
Assert.AreEqual(1, docs.TotalHits, "[just filter] Should find a hit...");
// cache hit:
Assert.AreEqual(missCount, filter.MissCount);
// now delete the doc, refresh the reader, and see that it's not there
writer.DeleteDocuments(new Term("id", "1"));
// NOTE: important to hold ref here so GC doesn't clear
// the cache entry! Else the assert below may sometimes
// fail:
oldReader = reader;
reader = RefreshReader(reader);
searcher = NewSearcher(reader, false);
missCount = filter.MissCount;
docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
Assert.AreEqual(0, docs.TotalHits, "[query + filter] Should *not* find a hit...");
// cache hit
Assert.AreEqual(missCount, filter.MissCount);
docs = searcher.Search(constantScore, 1);
Assert.AreEqual(0, docs.TotalHits, "[just filter] Should *not* find a hit...");
// apply deletes dynamically:
filter = new CachingWrapperFilter(startFilter);
writer.AddDocument(doc);
reader = RefreshReader(reader);
searcher = NewSearcher(reader, false);
docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
Assert.AreEqual(1, docs.TotalHits, "[query + filter] Should find a hit...");
missCount = filter.MissCount;
Assert.IsTrue(missCount > 0);
constantScore = new ConstantScoreQuery(filter);
docs = searcher.Search(constantScore, 1);
Assert.AreEqual(1, docs.TotalHits, "[just filter] Should find a hit...");
Assert.AreEqual(missCount, filter.MissCount);
writer.AddDocument(doc);
// NOTE: important to hold ref here so GC doesn't clear
// the cache entry! Else the assert below may sometimes
// fail:
oldReader = reader;
reader = RefreshReader(reader);
searcher = NewSearcher(reader, false);
docs = searcher.Search(new MatchAllDocsQuery(), filter, 1);
//.........这里部分代码省略.........
示例7: DoTestNumericsVsStoredFields
private void DoTestNumericsVsStoredFields(LongProducer longs)
{
Directory dir = NewDirectory();
IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, conf);
Document doc = new Document();
Field idField = new StringField("id", "", Field.Store.NO);
Field storedField = NewStringField("stored", "", Field.Store.YES);
Field dvField = new NumericDocValuesField("dv", 0);
doc.Add(idField);
doc.Add(storedField);
doc.Add(dvField);
// index some docs
int numDocs = AtLeast(300);
// numDocs should be always > 256 so that in case of a codec that optimizes
// for numbers of values <= 256, all storage layouts are tested
Debug.Assert(numDocs > 256);
for (int i = 0; i < numDocs; i++)
{
idField.StringValue = Convert.ToString(i);
long value = longs.Next();
storedField.StringValue = Convert.ToString(value);
dvField.LongValue = value;
writer.AddDocument(doc);
if (Random().Next(31) == 0)
{
writer.Commit();
}
}
// delete some docs
int numDeletions = Random().Next(numDocs / 10);
for (int i = 0; i < numDeletions; i++)
{
int id = Random().Next(numDocs);
writer.DeleteDocuments(new Term("id", Convert.ToString(id)));
}
// merge some segments and ensure that at least one of them has more than
// 256 values
writer.ForceMerge(numDocs / 256);
writer.Dispose();
// compare
DirectoryReader ir = DirectoryReader.Open(dir);
foreach (AtomicReaderContext context in ir.Leaves)
{
AtomicReader r = context.AtomicReader;
NumericDocValues docValues = r.GetNumericDocValues("dv");
for (int i = 0; i < r.MaxDoc; i++)
{
long storedValue = Convert.ToInt64(r.Document(i).Get("stored"));
Assert.AreEqual(storedValue, docValues.Get(i));
}
}
ir.Dispose();
dir.Dispose();
}
示例8: TestZeroTerms
public virtual void TestZeroTerms()
{
var d = NewDirectory();
RandomIndexWriter w = new RandomIndexWriter(Random(), d);
Document doc = new Document();
doc.Add(NewTextField("field", "one two three", Field.Store.NO));
doc = new Document();
doc.Add(NewTextField("field2", "one two three", Field.Store.NO));
w.AddDocument(doc);
w.Commit();
w.DeleteDocuments(new Term("field", "one"));
w.ForceMerge(1);
IndexReader r = w.Reader;
w.Dispose();
Assert.AreEqual(1, r.NumDocs);
Assert.AreEqual(1, r.MaxDoc);
Terms terms = MultiFields.GetTerms(r, "field");
if (terms != null)
{
Assert.IsNull(terms.Iterator(null).Next());
}
r.Dispose();
d.Dispose();
}
示例9: DoTestSortedVsFieldCache
private void DoTestSortedVsFieldCache(int minLength, int maxLength)
{
Directory dir = NewDirectory();
IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, conf);
Document doc = new Document();
Field idField = new StringField("id", "", Field.Store.NO);
Field indexedField = new StringField("indexed", "", Field.Store.NO);
Field dvField = new SortedDocValuesField("dv", new BytesRef());
doc.Add(idField);
doc.Add(indexedField);
doc.Add(dvField);
// index some docs
int numDocs = AtLeast(300);
for (int i = 0; i < numDocs; i++)
{
idField.StringValue = Convert.ToString(i);
int length;
if (minLength == maxLength)
{
length = minLength; // fixed length
}
else
{
length = TestUtil.NextInt(Random(), minLength, maxLength);
}
string value = TestUtil.RandomSimpleString(Random(), length);
indexedField.StringValue = value;
dvField.BytesValue = new BytesRef(value);
writer.AddDocument(doc);
if (Random().Next(31) == 0)
{
writer.Commit();
}
}
// delete some docs
int numDeletions = Random().Next(numDocs / 10);
for (int i = 0; i < numDeletions; i++)
{
int id = Random().Next(numDocs);
writer.DeleteDocuments(new Term("id", Convert.ToString(id)));
}
writer.Dispose();
// compare
DirectoryReader ir = DirectoryReader.Open(dir);
foreach (AtomicReaderContext context in ir.Leaves)
{
AtomicReader r = context.AtomicReader;
SortedDocValues expected = FieldCache.DEFAULT.GetTermsIndex(r, "indexed");
SortedDocValues actual = r.GetSortedDocValues("dv");
AssertEquals(r.MaxDoc, expected, actual);
}
ir.Dispose();
dir.Dispose();
}
示例10: TestRandom
//.........这里部分代码省略.........
string child = joinChildDoc.Get("child" + fieldID);
if (child != null)
{
sb.Append(" child" + fieldID + "=" + child);
}
}
Console.WriteLine(" " + sb);
}
if (doDeletes)
{
joinChildDoc.Add(NewStringField("blockID", "" + parentDocID, Field.Store.NO));
}
w.AddDocument(childDoc);
}
// Parent last:
joinDocs.Add(parentJoinDoc);
joinW.AddDocuments(joinDocs);
if (doDeletes && Random().Next(30) == 7)
{
toDelete.Add(parentDocID);
}
}
foreach (int deleteID in toDelete)
{
if (VERBOSE)
{
Console.WriteLine("DELETE parentID=" + deleteID);
}
w.DeleteDocuments(new Term("blockID", "" + deleteID));
joinW.DeleteDocuments(new Term("blockID", "" + deleteID));
}
IndexReader r = w.Reader;
w.Dispose();
IndexReader joinR = joinW.Reader;
joinW.Dispose();
if (VERBOSE)
{
Console.WriteLine("TEST: reader=" + r);
Console.WriteLine("TEST: joinReader=" + joinR);
for (int docIDX = 0; docIDX < joinR.MaxDoc; docIDX++)
{
Console.WriteLine(" docID=" + docIDX + " doc=" + joinR.Document(docIDX));
}
}
IndexSearcher s = NewSearcher(r);
IndexSearcher joinS = new IndexSearcher(joinR);
Filter parentsFilter = new FixedBitSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("isParent", "x"))));
int iters = 200 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < iters; iter++)
{
if (VERBOSE)
{
Console.WriteLine("TEST: iter=" + (1 + iter) + " of " + iters);
示例11: TestForceMergeDeletesMaxSegSize
public virtual void TestForceMergeDeletesMaxSegSize()
{
Directory dir = NewDirectory();
IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
TieredMergePolicy tmp = new TieredMergePolicy();
tmp.MaxMergedSegmentMB = 0.01;
tmp.ForceMergeDeletesPctAllowed = 0.0;
conf.SetMergePolicy(tmp);
RandomIndexWriter w = new RandomIndexWriter(Random(), dir, conf);
w.RandomForceMerge = false;
int numDocs = AtLeast(200);
for (int i = 0; i < numDocs; i++)
{
Document doc = new Document();
doc.Add(NewStringField("id", "" + i, Field.Store.NO));
doc.Add(NewTextField("content", "aaa " + i, Field.Store.NO));
w.AddDocument(doc);
}
w.ForceMerge(1);
IndexReader r = w.Reader;
Assert.AreEqual(numDocs, r.MaxDoc);
Assert.AreEqual(numDocs, r.NumDocs);
r.Dispose();
if (VERBOSE)
{
Console.WriteLine("\nTEST: delete doc");
}
w.DeleteDocuments(new Term("id", "" + (42 + 17)));
r = w.Reader;
Assert.AreEqual(numDocs, r.MaxDoc);
Assert.AreEqual(numDocs - 1, r.NumDocs);
r.Dispose();
w.ForceMergeDeletes();
r = w.Reader;
Assert.AreEqual(numDocs - 1, r.MaxDoc);
Assert.AreEqual(numDocs - 1, r.NumDocs);
r.Dispose();
w.Dispose();
dir.Dispose();
}
示例12: TestRandomStoredFields
public virtual void TestRandomStoredFields()
{
Directory dir = NewDirectory();
Random rand = Random();
RandomIndexWriter w = new RandomIndexWriter(rand, dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(TestUtil.NextInt(rand, 5, 20)));
//w.w.setNoCFSRatio(0.0);
int docCount = AtLeast(200);
int fieldCount = TestUtil.NextInt(rand, 1, 5);
IList<int?> fieldIDs = new List<int?>();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.Tokenized = false;
Field idField = NewField("id", "", customType);
for (int i = 0; i < fieldCount; i++)
{
fieldIDs.Add(i);
}
IDictionary<string, Document> docs = new Dictionary<string, Document>();
if (VERBOSE)
{
Console.WriteLine("TEST: build index docCount=" + docCount);
}
FieldType customType2 = new FieldType();
customType2.Stored = true;
for (int i = 0; i < docCount; i++)
{
Document doc = new Document();
doc.Add(idField);
string id = "" + i;
idField.StringValue = id;
docs[id] = doc;
if (VERBOSE)
{
Console.WriteLine("TEST: add doc id=" + id);
}
foreach (int field in fieldIDs)
{
string s;
if (rand.Next(4) != 3)
{
s = TestUtil.RandomUnicodeString(rand, 1000);
doc.Add(NewField("f" + field, s, customType2));
}
else
{
s = null;
}
}
w.AddDocument(doc);
if (rand.Next(50) == 17)
{
// mixup binding of field name -> Number every so often
fieldIDs = CollectionsHelper.Shuffle(fieldIDs);
}
if (rand.Next(5) == 3 && i > 0)
{
string delID = "" + rand.Next(i);
if (VERBOSE)
{
Console.WriteLine("TEST: delete doc id=" + delID);
}
w.DeleteDocuments(new Term("id", delID));
docs.Remove(delID);
}
}
if (VERBOSE)
{
Console.WriteLine("TEST: " + docs.Count + " docs in index; now load fields");
}
if (docs.Count > 0)
{
string[] idsList = docs.Keys.ToArray(/*new string[docs.Count]*/);
for (int x = 0; x < 2; x++)
{
IndexReader r = w.Reader;
IndexSearcher s = NewSearcher(r);
if (VERBOSE)
{
Console.WriteLine("TEST: cycle x=" + x + " r=" + r);
}
int num = AtLeast(1000);
for (int iter = 0; iter < num; iter++)
{
string testID = idsList[rand.Next(idsList.Length)];
if (VERBOSE)
{
Console.WriteLine("TEST: test id=" + testID);
}
TopDocs hits = s.Search(new TermQuery(new Term("id", testID)), 1);
Assert.AreEqual(1, hits.TotalHits);
//.........这里部分代码省略.........
示例13: BeforeClass
public void BeforeClass()
{
Random random = Random();
Directory = NewDirectory();
Stopword = "" + RandomChar();
CharacterRunAutomaton stopset = new CharacterRunAutomaton(BasicAutomata.MakeString(Stopword));
Analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false, stopset);
RandomIndexWriter iw = new RandomIndexWriter(random, Directory, Analyzer, ClassEnvRule.Similarity, ClassEnvRule.TimeZone);
Document doc = new Document();
Field id = new StringField("id", "", Field.Store.NO);
Field field = new TextField("field", "", Field.Store.NO);
doc.Add(id);
doc.Add(field);
// index some docs
int numDocs = AtLeast(1000);
for (int i = 0; i < numDocs; i++)
{
id.StringValue = Convert.ToString(i);
field.StringValue = RandomFieldContents();
iw.AddDocument(doc);
}
// delete some docs
int numDeletes = numDocs / 20;
for (int i = 0; i < numDeletes; i++)
{
Term toDelete = new Term("id", Convert.ToString(random.Next(numDocs)));
if (random.NextBoolean())
{
iw.DeleteDocuments(toDelete);
}
else
{
iw.DeleteDocuments(new TermQuery(toDelete));
}
}
Reader = iw.Reader;
S1 = NewSearcher(Reader);
S2 = NewSearcher(Reader);
iw.Dispose();
}
示例14: DoTestMissingVsFieldCache
private void DoTestMissingVsFieldCache(LongProducer longs)
{
AssumeTrue("Codec does not support GetDocsWithField", DefaultCodecSupportsDocsWithField());
Directory dir = NewDirectory();
IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, conf);
Field idField = new StringField("id", "", Field.Store.NO);
Field indexedField = NewStringField("indexed", "", Field.Store.NO);
Field dvField = new NumericDocValuesField("dv", 0);
// index some docs
int numDocs = AtLeast(300);
// numDocs should be always > 256 so that in case of a codec that optimizes
// for numbers of values <= 256, all storage layouts are tested
Debug.Assert(numDocs > 256);
for (int i = 0; i < numDocs; i++)
{
idField.StringValue = Convert.ToString(i);
long value = longs.Next();
indexedField.StringValue = Convert.ToString(value);
dvField.LongValue = value;
Document doc = new Document();
doc.Add(idField);
// 1/4 of the time we neglect to add the fields
if (Random().Next(4) > 0)
{
doc.Add(indexedField);
doc.Add(dvField);
}
writer.AddDocument(doc);
if (Random().Next(31) == 0)
{
writer.Commit();
}
}
// delete some docs
int numDeletions = Random().Next(numDocs / 10);
for (int i = 0; i < numDeletions; i++)
{
int id = Random().Next(numDocs);
writer.DeleteDocuments(new Term("id", Convert.ToString(id)));
}
// merge some segments and ensure that at least one of them has more than
// 256 values
writer.ForceMerge(numDocs / 256);
writer.Dispose();
// compare
DirectoryReader ir = DirectoryReader.Open(dir);
foreach (var context in ir.Leaves)
{
AtomicReader r = context.AtomicReader;
Bits expected = FieldCache.DEFAULT.GetDocsWithField(r, "indexed");
Bits actual = FieldCache.DEFAULT.GetDocsWithField(r, "dv");
AssertEquals(expected, actual);
}
ir.Dispose();
dir.Dispose();
}
示例15: TestSortedMergeAwayAllValues
// [Test] // LUCENENET NOTE: For now, we are overriding this test in every subclass to pull it into the right context for the subclass
public virtual void TestSortedMergeAwayAllValues()
{
Directory directory = NewDirectory();
Analyzer analyzer = new MockAnalyzer(Random());
IndexWriterConfig iwconfig = NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
iwconfig.SetMergePolicy(NewLogMergePolicy());
RandomIndexWriter iwriter = new RandomIndexWriter(Random(), directory, iwconfig);
Document doc = new Document();
doc.Add(new StringField("id", "0", Field.Store.NO));
iwriter.AddDocument(doc);
doc = new Document();
doc.Add(new StringField("id", "1", Field.Store.NO));
doc.Add(new SortedDocValuesField("field", new BytesRef("hello")));
iwriter.AddDocument(doc);
iwriter.Commit();
iwriter.DeleteDocuments(new Term("id", "1"));
iwriter.ForceMerge(1);
DirectoryReader ireader = iwriter.Reader;
iwriter.Dispose();
SortedDocValues dv = GetOnlySegmentReader(ireader).GetSortedDocValues("field");
if (DefaultCodecSupportsDocsWithField())
{
Assert.AreEqual(-1, dv.GetOrd(0));
Assert.AreEqual(0, dv.ValueCount);
}
else
{
Assert.AreEqual(0, dv.GetOrd(0));
Assert.AreEqual(1, dv.ValueCount);
BytesRef @ref = new BytesRef();
dv.LookupOrd(0, @ref);
Assert.AreEqual(new BytesRef(), @ref);
}
ireader.Dispose();
directory.Dispose();
}