本文整理汇总了C#中Lucene.Net.Util.BytesRef.Utf8ToString方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Util.BytesRef.Utf8ToString方法的具体用法?C# Lucene.Net.Util.BytesRef.Utf8ToString怎么用?C# Lucene.Net.Util.BytesRef.Utf8ToString使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Util.BytesRef
的用法示例。
在下文中一共展示了Lucene.Net.Util.BytesRef.Utf8ToString方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Add
/// <summary>
/// Adds a new <<fieldNumber, termBytes>, TermInfo> pair to the set.
/// Term must be lexicographically greater than all previous Terms added.
/// TermInfo pointers must be positive and greater than all previous.
/// </summary>
public void Add(int fieldNumber, BytesRef term, TermInfo ti)
{
Debug.Assert(CompareToLastTerm(fieldNumber, term) < 0 || (IsIndex && term.Length == 0 && LastTerm.Length == 0), "Terms are out of order: field=" + FieldName(FieldInfos, fieldNumber) + " (number " + fieldNumber + ")" + " lastField=" + FieldName(FieldInfos, LastFieldNumber) + " (number " + LastFieldNumber + ")" + " text=" + term.Utf8ToString() + " lastText=" + LastTerm.Utf8ToString());
Debug.Assert(ti.FreqPointer >= LastTi.FreqPointer, "freqPointer out of order (" + ti.FreqPointer + " < " + LastTi.FreqPointer + ")");
Debug.Assert(ti.ProxPointer >= LastTi.ProxPointer, "proxPointer out of order (" + ti.ProxPointer + " < " + LastTi.ProxPointer + ")");
if (!IsIndex && Size % IndexInterval == 0)
{
Other.Add(LastFieldNumber, LastTerm, LastTi); // add an index term
}
WriteTerm(fieldNumber, term); // write term
Output.WriteVInt(ti.DocFreq); // write doc freq
Output.WriteVLong(ti.FreqPointer - LastTi.FreqPointer); // write pointers
Output.WriteVLong(ti.ProxPointer - LastTi.ProxPointer);
if (ti.DocFreq >= SkipInterval)
{
Output.WriteVInt(ti.SkipOffset);
}
if (IsIndex)
{
Output.WriteVLong(Other.Output.FilePointer - LastIndexPointer);
LastIndexPointer = Other.Output.FilePointer; // write pointer
}
LastFieldNumber = fieldNumber;
LastTi.Set(ti);
Size++;
}
示例2: TestMultipleDocValuesTypes
public virtual void TestMultipleDocValuesTypes()
{
Directory dir = NewDirectory();
IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
conf.SetMaxBufferedDocs(10); // prevent merges
IndexWriter writer = new IndexWriter(dir, conf);
for (int i = 0; i < 4; i++)
{
Document doc = new Document();
doc.Add(new StringField("dvUpdateKey", "dv", Store.NO));
doc.Add(new NumericDocValuesField("ndv", i));
doc.Add(new BinaryDocValuesField("bdv", new BytesRef(Convert.ToString(i))));
doc.Add(new SortedDocValuesField("sdv", new BytesRef(Convert.ToString(i))));
doc.Add(new SortedSetDocValuesField("ssdv", new BytesRef(Convert.ToString(i))));
doc.Add(new SortedSetDocValuesField("ssdv", new BytesRef(Convert.ToString(i * 2))));
writer.AddDocument(doc);
}
writer.Commit();
// update all docs' ndv field
writer.UpdateNumericDocValue(new Term("dvUpdateKey", "dv"), "ndv", 17L);
writer.Dispose();
DirectoryReader reader = DirectoryReader.Open(dir);
AtomicReader r = (AtomicReader)reader.Leaves[0].Reader;
NumericDocValues ndv = r.GetNumericDocValues("ndv");
BinaryDocValues bdv = r.GetBinaryDocValues("bdv");
SortedDocValues sdv = r.GetSortedDocValues("sdv");
SortedSetDocValues ssdv = r.GetSortedSetDocValues("ssdv");
BytesRef scratch = new BytesRef();
for (int i = 0; i < r.MaxDoc; i++)
{
Assert.AreEqual(17, ndv.Get(i));
bdv.Get(i, scratch);
Assert.AreEqual(new BytesRef(Convert.ToString(i)), scratch);
sdv.Get(i, scratch);
Assert.AreEqual(new BytesRef(Convert.ToString(i)), scratch);
ssdv.Document = i;
long ord = ssdv.NextOrd();
ssdv.LookupOrd(ord, scratch);
Assert.AreEqual(i, Convert.ToInt32(scratch.Utf8ToString()));
if (i != 0)
{
ord = ssdv.NextOrd();
ssdv.LookupOrd(ord, scratch);
Assert.AreEqual(i * 2, Convert.ToInt32(scratch.Utf8ToString()));
}
Assert.AreEqual(SortedSetDocValues.NO_MORE_ORDS, ssdv.NextOrd());
}
reader.Dispose();
dir.Dispose();
}
示例3: ParseFloat
public float ParseFloat(BytesRef term)
{
// TODO: would be far better to directly parse from
// UTF8 bytes... but really users should use
// FloatField, instead, which already decodes
// directly from byte[]
// LUCENENET: We parse to double first and then cast to float, which allows us to parse
// double.MaxValue.ToString("R") (resulting in Infinity). This is how it worked in Java
// and the TestFieldCache.TestInfoStream() test depends on this behavior to pass.
return (float)double.Parse(term.Utf8ToString(), NumberStyles.Float, CultureInfo.InvariantCulture);
}
示例4: ParseShort
public short ParseShort(BytesRef term)
{
// TODO: would be far better to directly parse from
// UTF8 bytes... but really users should use
// IntField, instead, which already decodes
// directly from byte[]
return short.Parse(term.Utf8ToString(), NumberStyles.Integer, CultureInfo.InvariantCulture);
}
示例5: Verify
private void Verify(AtomicReader r, int[][] idToOrds, BytesRef[] termsArray, BytesRef prefixRef)
{
DocTermOrds dto = new DocTermOrds(r, r.LiveDocs, "field", prefixRef, int.MaxValue, TestUtil.NextInt(Random(), 2, 10));
FieldCache.Ints docIDToID = FieldCache.DEFAULT.GetInts(r, "id", false);
/*
for(int docID=0;docID<subR.MaxDoc;docID++) {
System.out.println(" docID=" + docID + " id=" + docIDToID[docID]);
}
*/
if (VERBOSE)
{
Console.WriteLine("TEST: verify prefix=" + (prefixRef == null ? "null" : prefixRef.Utf8ToString()));
Console.WriteLine("TEST: all TERMS:");
TermsEnum allTE = MultiFields.GetTerms(r, "field").Iterator(null);
int ord = 0;
while (allTE.Next() != null)
{
Console.WriteLine(" ord=" + (ord++) + " term=" + allTE.Term().Utf8ToString());
}
}
//final TermsEnum te = subR.Fields.Terms("field").iterator();
TermsEnum te = dto.GetOrdTermsEnum(r);
if (dto.NumTerms() == 0)
{
if (prefixRef == null)
{
Assert.IsNull(MultiFields.GetTerms(r, "field"));
}
else
{
Terms terms = MultiFields.GetTerms(r, "field");
if (terms != null)
{
TermsEnum termsEnum = terms.Iterator(null);
TermsEnum.SeekStatus result = termsEnum.SeekCeil(prefixRef);
if (result != TermsEnum.SeekStatus.END)
{
Assert.IsFalse(StringHelper.StartsWith(termsEnum.Term(), prefixRef), "term=" + termsEnum.Term().Utf8ToString() + " matches prefix=" + prefixRef.Utf8ToString());
}
else
{
// ok
}
}
else
{
// ok
}
}
return;
}
if (VERBOSE)
{
Console.WriteLine("TEST: TERMS:");
te.SeekExact(0);
while (true)
{
Console.WriteLine(" ord=" + te.Ord() + " term=" + te.Term().Utf8ToString());
if (te.Next() == null)
{
break;
}
}
}
SortedSetDocValues iter = dto.GetIterator(r);
for (int docID = 0; docID < r.MaxDoc; docID++)
{
if (VERBOSE)
{
Console.WriteLine("TEST: docID=" + docID + " of " + r.MaxDoc + " (id=" + docIDToID.Get(docID) + ")");
}
iter.Document = docID;
int[] answers = idToOrds[docIDToID.Get(docID)];
int upto = 0;
long ord;
while ((ord = iter.NextOrd()) != SortedSetDocValues.NO_MORE_ORDS)
{
te.SeekExact(ord);
BytesRef expected = termsArray[answers[upto++]];
if (VERBOSE)
{
Console.WriteLine(" exp=" + expected.Utf8ToString() + " actual=" + te.Term().Utf8ToString());
}
Assert.AreEqual(expected, te.Term(), "expected=" + expected.Utf8ToString() + " actual=" + te.Term().Utf8ToString() + " ord=" + ord);
}
Assert.AreEqual(answers.Length, upto);
}
}
示例6: ParseDouble
public double ParseDouble(BytesRef term)
{
// TODO: would be far better to directly parse from
// UTF8 bytes... but really users should use
// DoubleField, instead, which already decodes
// directly from byte[]
return double.Parse(term.Utf8ToString(), NumberStyles.Float, CultureInfo.InvariantCulture);
}
示例7: TestRandomSortedBytes
public void TestRandomSortedBytes()
{
Directory dir = NewDirectory();
IndexWriterConfig cfg = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
if (!DefaultCodecSupportsDocsWithField())
{
// if the codec doesnt support missing, we expect missing to be mapped to byte[]
// by the impersonator, but we have to give it a chance to merge them to this
cfg.SetMergePolicy(NewLogMergePolicy());
}
RandomIndexWriter w = new RandomIndexWriter(Random(), dir, cfg);
int numDocs = AtLeast(100);
BytesRefHash hash = new BytesRefHash();
IDictionary<string, string> docToString = new Dictionary<string, string>();
int maxLength = TestUtil.NextInt(Random(), 1, 50);
for (int i = 0; i < numDocs; i++)
{
Document doc = new Document();
doc.Add(NewTextField("id", "" + i, Field.Store.YES));
string @string = TestUtil.RandomRealisticUnicodeString(Random(), 1, maxLength);
BytesRef br = new BytesRef(@string);
doc.Add(new SortedDocValuesField("field", br));
hash.Add(br);
docToString["" + i] = @string;
w.AddDocument(doc);
}
if (Rarely())
{
w.Commit();
}
int numDocsNoValue = AtLeast(10);
for (int i = 0; i < numDocsNoValue; i++)
{
Document doc = new Document();
doc.Add(NewTextField("id", "noValue", Field.Store.YES));
w.AddDocument(doc);
}
if (!DefaultCodecSupportsDocsWithField())
{
BytesRef bytesRef = new BytesRef();
hash.Add(bytesRef); // add empty value for the gaps
}
if (Rarely())
{
w.Commit();
}
if (!DefaultCodecSupportsDocsWithField())
{
// if the codec doesnt support missing, we expect missing to be mapped to byte[]
// by the impersonator, but we have to give it a chance to merge them to this
w.ForceMerge(1);
}
for (int i = 0; i < numDocs; i++)
{
Document doc = new Document();
string id = "" + i + numDocs;
doc.Add(NewTextField("id", id, Field.Store.YES));
string @string = TestUtil.RandomRealisticUnicodeString(Random(), 1, maxLength);
BytesRef br = new BytesRef(@string);
hash.Add(br);
docToString[id] = @string;
doc.Add(new SortedDocValuesField("field", br));
w.AddDocument(doc);
}
w.Commit();
IndexReader reader = w.Reader;
SortedDocValues docValues = MultiDocValues.GetSortedValues(reader, "field");
int[] sort = hash.Sort(BytesRef.UTF8SortedAsUnicodeComparer);
BytesRef expected = new BytesRef();
BytesRef actual = new BytesRef();
Assert.AreEqual(hash.Size(), docValues.ValueCount);
for (int i = 0; i < hash.Size(); i++)
{
hash.Get(sort[i], expected);
docValues.LookupOrd(i, actual);
Assert.AreEqual(expected.Utf8ToString(), actual.Utf8ToString());
int ord = docValues.LookupTerm(expected);
Assert.AreEqual(i, ord);
}
AtomicReader slowR = SlowCompositeReaderWrapper.Wrap(reader);
ISet<KeyValuePair<string, string>> entrySet = docToString.EntrySet();
foreach (KeyValuePair<string, string> entry in entrySet)
{
// pk lookup
DocsEnum termDocsEnum = slowR.TermDocsEnum(new Term("id", entry.Key));
int docId = termDocsEnum.NextDoc();
expected = new BytesRef(entry.Value);
docValues.Get(docId, actual);
Assert.AreEqual(expected, actual);
}
reader.Dispose();
w.Dispose();
dir.Dispose();
}
示例8: VerifyEnum
// Maximum options (docs/freqs/positions/offsets) to test:
private void VerifyEnum(ThreadState threadState, string field, BytesRef term, TermsEnum termsEnum, FieldInfo.IndexOptions maxTestOptions, FieldInfo.IndexOptions maxIndexOptions, ISet<Option> options, bool alwaysTestMax)
{
if (VERBOSE)
{
Console.WriteLine(" verifyEnum: options=" + options + " maxTestOptions=" + maxTestOptions);
}
// Make sure TermsEnum really is positioned on the
// expected term:
Assert.AreEqual(term, termsEnum.Term());
// 50% of the time time pass liveDocs:
bool useLiveDocs = options.Contains(Option.LIVE_DOCS) && Random().NextBoolean();
Bits liveDocs;
if (useLiveDocs)
{
liveDocs = GlobalLiveDocs;
if (VERBOSE)
{
Console.WriteLine(" use liveDocs");
}
}
else
{
liveDocs = null;
if (VERBOSE)
{
Console.WriteLine(" no liveDocs");
}
}
FieldInfo fieldInfo = CurrentFieldInfos.FieldInfo(field);
// NOTE: can be empty list if we are using liveDocs:
SeedPostings expected = GetSeedPostings(term.Utf8ToString(), Fields[field][term], useLiveDocs, maxIndexOptions);
Assert.AreEqual(expected.DocFreq, termsEnum.DocFreq());
bool allowFreqs = fieldInfo.FieldIndexOptions >= FieldInfo.IndexOptions.DOCS_AND_FREQS && maxTestOptions.CompareTo(FieldInfo.IndexOptions.DOCS_AND_FREQS) >= 0;
bool doCheckFreqs = allowFreqs && (alwaysTestMax || Random().Next(3) <= 2);
bool allowPositions = fieldInfo.FieldIndexOptions >= FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS && maxTestOptions.CompareTo(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
bool doCheckPositions = allowPositions && (alwaysTestMax || Random().Next(3) <= 2);
bool allowOffsets = fieldInfo.FieldIndexOptions >= FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS && maxTestOptions.CompareTo(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
bool doCheckOffsets = allowOffsets && (alwaysTestMax || Random().Next(3) <= 2);
bool doCheckPayloads = options.Contains(Option.PAYLOADS) && allowPositions && fieldInfo.HasPayloads() && (alwaysTestMax || Random().Next(3) <= 2);
DocsEnum prevDocsEnum = null;
DocsEnum docsEnum;
DocsAndPositionsEnum docsAndPositionsEnum;
if (!doCheckPositions)
{
if (allowPositions && Random().Next(10) == 7)
{
// 10% of the time, even though we will not check positions, pull a DocsAndPositions enum
if (options.Contains(Option.REUSE_ENUMS) && Random().Next(10) < 9)
{
prevDocsEnum = threadState.ReuseDocsAndPositionsEnum;
}
int flags = 0;
if (alwaysTestMax || Random().NextBoolean())
{
flags |= DocsAndPositionsEnum.FLAG_OFFSETS;
}
if (alwaysTestMax || Random().NextBoolean())
{
flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
}
if (VERBOSE)
{
Console.WriteLine(" get DocsAndPositionsEnum (but we won't check positions) flags=" + flags);
}
threadState.ReuseDocsAndPositionsEnum = termsEnum.DocsAndPositions(liveDocs, (DocsAndPositionsEnum)prevDocsEnum, flags);
docsEnum = threadState.ReuseDocsAndPositionsEnum;
docsAndPositionsEnum = threadState.ReuseDocsAndPositionsEnum;
}
else
{
if (VERBOSE)
{
Console.WriteLine(" get DocsEnum");
}
if (options.Contains(Option.REUSE_ENUMS) && Random().Next(10) < 9)
{
prevDocsEnum = threadState.ReuseDocsEnum;
}
threadState.ReuseDocsEnum = termsEnum.Docs(liveDocs, prevDocsEnum, doCheckFreqs ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
docsEnum = threadState.ReuseDocsEnum;
docsAndPositionsEnum = null;
}
}
else
//.........这里部分代码省略.........
示例9: TestCodecUsesOwnSortedBytesEachTime
public void TestCodecUsesOwnSortedBytesEachTime()
{
Analyzer analyzer = new MockAnalyzer(Random());
Directory directory = NewDirectory();
IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
conf.SetMergePolicy(NewLogMergePolicy());
RandomIndexWriter iwriter = new RandomIndexWriter(Random(), directory, conf);
Document doc = new Document();
doc.Add(new SortedDocValuesField("dv", new BytesRef("foo!")));
iwriter.AddDocument(doc);
doc = new Document();
doc.Add(new SortedDocValuesField("dv", new BytesRef("bar!")));
iwriter.AddDocument(doc);
iwriter.Dispose();
// Now search the index:
IndexReader ireader = DirectoryReader.Open(directory); // read-only=true
Debug.Assert(ireader.Leaves.Count == 1);
BinaryDocValues dv = ((AtomicReader)ireader.Leaves[0].Reader).GetSortedDocValues("dv");
BytesRef scratch = new BytesRef();
dv.Get(0, scratch);
Assert.AreEqual("foo!", scratch.Utf8ToString());
BytesRef scratch2 = new BytesRef();
dv.Get(1, scratch2);
Assert.AreEqual("bar!", scratch2.Utf8ToString());
// check scratch is still valid
Assert.AreEqual("foo!", scratch.Utf8ToString());
ireader.Dispose();
directory.Dispose();
}
示例10: TestEmptySortedBytes
public void TestEmptySortedBytes()
{
Analyzer analyzer = new MockAnalyzer(Random());
Directory directory = NewDirectory();
IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
conf.SetMergePolicy(NewLogMergePolicy());
RandomIndexWriter iwriter = new RandomIndexWriter(Random(), directory, conf);
Document doc = new Document();
doc.Add(new SortedDocValuesField("dv", new BytesRef("")));
iwriter.AddDocument(doc);
doc = new Document();
doc.Add(new SortedDocValuesField("dv", new BytesRef("")));
iwriter.AddDocument(doc);
iwriter.ForceMerge(1);
iwriter.Dispose();
// Now search the index:
IndexReader ireader = DirectoryReader.Open(directory); // read-only=true
Debug.Assert(ireader.Leaves.Count == 1);
SortedDocValues dv = ((AtomicReader)ireader.Leaves[0].Reader).GetSortedDocValues("dv");
BytesRef scratch = new BytesRef();
Assert.AreEqual(0, dv.GetOrd(0));
Assert.AreEqual(0, dv.GetOrd(1));
dv.LookupOrd(dv.GetOrd(0), scratch);
Assert.AreEqual("", scratch.Utf8ToString());
ireader.Dispose();
directory.Dispose();
}
示例11: Run
public override void Run()
{
try
{
StartingGun.Wait();
foreach (AtomicReaderContext context in Ir.Leaves)
{
AtomicReader r = context.AtomicReader;
BinaryDocValues binaries = r.GetBinaryDocValues("dvBin");
Bits binaryBits = r.GetDocsWithField("dvBin");
SortedDocValues sorted = r.GetSortedDocValues("dvSorted");
Bits sortedBits = r.GetDocsWithField("dvSorted");
NumericDocValues numerics = r.GetNumericDocValues("dvNum");
Bits numericBits = r.GetDocsWithField("dvNum");
SortedSetDocValues sortedSet = r.GetSortedSetDocValues("dvSortedSet");
Bits sortedSetBits = r.GetDocsWithField("dvSortedSet");
for (int j = 0; j < r.MaxDoc; j++)
{
BytesRef binaryValue = r.Document(j).GetBinaryValue("storedBin");
if (binaryValue != null)
{
if (binaries != null)
{
BytesRef scratch = new BytesRef();
binaries.Get(j, scratch);
Assert.AreEqual(binaryValue, scratch);
sorted.Get(j, scratch);
Assert.AreEqual(binaryValue, scratch);
Assert.IsTrue(binaryBits.Get(j));
Assert.IsTrue(sortedBits.Get(j));
}
}
else if (binaries != null)
{
Assert.IsFalse(binaryBits.Get(j));
Assert.IsFalse(sortedBits.Get(j));
Assert.AreEqual(-1, sorted.GetOrd(j));
}
string number = r.Document(j).Get("storedNum");
if (number != null)
{
if (numerics != null)
{
Assert.AreEqual(Convert.ToInt64(number), numerics.Get(j));
}
}
else if (numerics != null)
{
Assert.IsFalse(numericBits.Get(j));
Assert.AreEqual(0, numerics.Get(j));
}
string[] values = r.Document(j).GetValues("storedSortedSet");
if (values.Length > 0)
{
Assert.IsNotNull(sortedSet);
sortedSet.Document = j;
for (int k = 0; k < values.Length; k++)
{
long ord = sortedSet.NextOrd();
Assert.IsTrue(ord != SortedSetDocValues.NO_MORE_ORDS);
BytesRef value = new BytesRef();
sortedSet.LookupOrd(ord, value);
Assert.AreEqual(values[k], value.Utf8ToString());
}
Assert.AreEqual(SortedSetDocValues.NO_MORE_ORDS, sortedSet.NextOrd());
Assert.IsTrue(sortedSetBits.Get(j));
}
else if (sortedSet != null)
{
sortedSet.Document = j;
Assert.AreEqual(SortedSetDocValues.NO_MORE_ORDS, sortedSet.NextOrd());
Assert.IsFalse(sortedSetBits.Get(j));
}
}
}
TestUtil.CheckReader(Ir);
}
catch (Exception e)
{
throw new Exception(e.Message, e);
}
}
示例12: DoTestSortedSetVsStoredFields
private void DoTestSortedSetVsStoredFields(int minLength, int maxLength, int maxValuesPerDoc)
{
Directory dir = NewDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()));
RandomIndexWriter writer = new RandomIndexWriter(Random(), dir, conf);
// index some docs
int numDocs = AtLeast(300);
for (int i = 0; i < numDocs; i++)
{
Document doc = new Document();
Field idField = new StringField("id", Convert.ToString(i), Field.Store.NO);
doc.Add(idField);
int length;
if (minLength == maxLength)
{
length = minLength; // fixed length
}
else
{
length = TestUtil.NextInt(Random(), minLength, maxLength);
}
int numValues = TestUtil.NextInt(Random(), 0, maxValuesPerDoc);
// create a random set of strings
SortedSet<string> values = new SortedSet<string>();
for (int v = 0; v < numValues; v++)
{
values.Add(TestUtil.RandomSimpleString(Random(), length));
}
// add ordered to the stored field
foreach (string v in values)
{
doc.Add(new StoredField("stored", v));
}
// add in any order to the dv field
IList<string> unordered = new List<string>(values);
unordered = CollectionsHelper.Shuffle(unordered);
foreach (string v in unordered)
{
doc.Add(new SortedSetDocValuesField("dv", new BytesRef(v)));
}
writer.AddDocument(doc);
if (Random().Next(31) == 0)
{
writer.Commit();
}
}
// delete some docs
int numDeletions = Random().Next(numDocs / 10);
for (int i = 0; i < numDeletions; i++)
{
int id = Random().Next(numDocs);
writer.DeleteDocuments(new Term("id", Convert.ToString(id)));
}
writer.Dispose();
// compare
DirectoryReader ir = DirectoryReader.Open(dir);
foreach (AtomicReaderContext context in ir.Leaves)
{
AtomicReader r = context.AtomicReader;
SortedSetDocValues docValues = r.GetSortedSetDocValues("dv");
BytesRef scratch = new BytesRef();
for (int i = 0; i < r.MaxDoc; i++)
{
string[] stringValues = r.Document(i).GetValues("stored");
if (docValues != null)
{
docValues.Document = i;
}
for (int j = 0; j < stringValues.Length; j++)
{
Debug.Assert(docValues != null);
long ord = docValues.NextOrd();
Debug.Assert(ord != SortedSetDocValues.NO_MORE_ORDS);
docValues.LookupOrd(ord, scratch);
Assert.AreEqual(stringValues[j], scratch.Utf8ToString());
}
Debug.Assert(docValues == null || docValues.NextOrd() == SortedSetDocValues.NO_MORE_ORDS);
}
}
ir.Dispose();
dir.Dispose();
}
示例13: TestBytesTwoDocumentsMerged
public void TestBytesTwoDocumentsMerged()
{
Analyzer analyzer = new MockAnalyzer(Random());
Directory directory = NewDirectory();
IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
conf.SetMergePolicy(NewLogMergePolicy());
RandomIndexWriter iwriter = new RandomIndexWriter(Random(), directory, conf);
Document doc = new Document();
doc.Add(NewField("id", "0", StringField.TYPE_STORED));
doc.Add(new BinaryDocValuesField("dv", new BytesRef("hello world 1")));
iwriter.AddDocument(doc);
iwriter.Commit();
doc = new Document();
doc.Add(NewField("id", "1", StringField.TYPE_STORED));
doc.Add(new BinaryDocValuesField("dv", new BytesRef("hello 2")));
iwriter.AddDocument(doc);
iwriter.ForceMerge(1);
iwriter.Dispose();
// Now search the index:
IndexReader ireader = DirectoryReader.Open(directory); // read-only=true
Debug.Assert(ireader.Leaves.Count == 1);
BinaryDocValues dv = ((AtomicReader)ireader.Leaves[0].Reader).GetBinaryDocValues("dv");
BytesRef scratch = new BytesRef();
for (int i = 0; i < 2; i++)
{
Document doc2 = ((AtomicReader)ireader.Leaves[0].Reader).Document(i);
string expected;
if (doc2.Get("id").Equals("0"))
{
expected = "hello world 1";
}
else
{
expected = "hello 2";
}
dv.Get(i, scratch);
Assert.AreEqual(expected, scratch.Utf8ToString());
}
ireader.Dispose();
directory.Dispose();
}
示例14: Accept
protected override AcceptStatus Accept(BytesRef term)
{
boostAtt.Boost = Convert.ToSingle(term.Utf8ToString());
return base.Accept(term);
}
示例15: ParseShort
public short ParseShort(BytesRef term)
{
// TODO: would be far better to directly parse from
// UTF8 bytes... but really users should use
// IntField, instead, which already decodes
// directly from byte[]
return short.Parse(term.Utf8ToString());
}