本文整理汇总了C#中Lucene.Net.Index.SegmentWriteState类的典型用法代码示例。如果您正苦于以下问题:C# SegmentWriteState类的具体用法?C# SegmentWriteState怎么用?C# SegmentWriteState使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SegmentWriteState类属于Lucene.Net.Index命名空间,在下文中一共展示了SegmentWriteState类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Flush
public override void Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
{
System.Collections.IDictionary oneThreadsAndFields = new System.Collections.Hashtable();
System.Collections.IDictionary twoThreadsAndFields = new System.Collections.Hashtable();
System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
while (it.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.Key;
System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
System.Collections.Hashtable oneFields = new System.Collections.Hashtable();
System.Collections.Hashtable twoFields = new System.Collections.Hashtable();
while (fieldsIt.MoveNext())
{
DocFieldConsumersPerField perField = (DocFieldConsumersPerField) fieldsIt.Current;
SupportClass.CollectionsHelper.AddIfNotContains(oneFields, perField.one);
SupportClass.CollectionsHelper.AddIfNotContains(twoFields, perField.two);
}
oneThreadsAndFields[perThread.one] = oneFields;
twoThreadsAndFields[perThread.two] = twoFields;
}
one.Flush(oneThreadsAndFields, state);
two.Flush(twoThreadsAndFields, state);
}
示例2: FieldsConsumer
public override FieldsConsumer FieldsConsumer(SegmentWriteState state)
{
PostingsWriterBase docsWriter = null;
PostingsWriterBase pulsingWriterInner = null;
PostingsWriterBase pulsingWriter = null;
// Terms dict
bool success = false;
try
{
docsWriter = new Lucene41PostingsWriter(state);
pulsingWriterInner = new PulsingPostingsWriter(state, 2, docsWriter);
pulsingWriter = new PulsingPostingsWriter(state, 1, pulsingWriterInner);
FieldsConsumer ret = new BlockTreeTermsWriter(state, pulsingWriter,
BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
success = true;
return ret;
}
finally
{
if (!success)
{
IOUtils.CloseWhileHandlingException(docsWriter, pulsingWriterInner, pulsingWriter);
}
}
}
示例3: Flush
public override void Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
{
System.Collections.IDictionary childThreadsAndFields = new System.Collections.Hashtable();
System.Collections.IDictionary endChildThreadsAndFields = new System.Collections.Hashtable();
System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
while (it.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
DocInverterPerThread perThread = (DocInverterPerThread) entry.Key;
System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
System.Collections.Hashtable childFields = new System.Collections.Hashtable();
System.Collections.Hashtable endChildFields = new System.Collections.Hashtable();
while (fieldsIt.MoveNext())
{
DocInverterPerField perField = (DocInverterPerField) ((System.Collections.DictionaryEntry) fieldsIt.Current).Key;
childFields[perField.consumer] = perField.consumer;
endChildFields[perField.endConsumer] = perField.endConsumer;
}
childThreadsAndFields[perThread.consumer] = childFields;
endChildThreadsAndFields[perThread.endConsumer] = endChildFields;
}
consumer.Flush(childThreadsAndFields, state);
endConsumer.Flush(endChildThreadsAndFields, state);
}
示例4: FormatPostingsTermsWriter
internal FormatPostingsTermsWriter(SegmentWriteState state, FormatPostingsFieldsWriter parent)
: base()
{
this.parent = parent;
termsOut = parent.termsOut;
docsWriter = new FormatPostingsDocsWriter(state, this);
}
示例5: CloseDocStore
public void CloseDocStore(SegmentWriteState state)
{
lock (this)
{
int inc = state.numDocsInStore - lastDocID;
if (inc > 0)
{
InitFieldsWriter();
Fill(state.numDocsInStore - docWriter.GetDocStoreOffset());
}
if (fieldsWriter != null)
{
fieldsWriter.Close();
fieldsWriter = null;
lastDocID = 0;
System.Diagnostics.Debug.Assert(state.docStoreSegmentName != null);
SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION);
SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION);
state.docWriter.RemoveOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION);
state.docWriter.RemoveOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION);
System.String fileName = state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
if (4 + ((long) state.numDocsInStore) * 8 != state.directory.FileLength(fileName))
throw new System.SystemException("after flush: fdx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.FileLength(fileName) + " length in bytes of " + fileName + " file exists?=" + state.directory.FileExists(fileName));
}
}
}
示例6: Flush
public override void Flush(Support.Dictionary<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state)
{
Support.Dictionary<InvertedDocConsumerPerThread, IList<InvertedDocConsumerPerField>> childThreadsAndFields = new Support.Dictionary<InvertedDocConsumerPerThread, IList<InvertedDocConsumerPerField>>();
Support.Dictionary<InvertedDocEndConsumerPerThread, IList<InvertedDocEndConsumerPerField>> endChildThreadsAndFields = new Support.Dictionary<InvertedDocEndConsumerPerThread, IList<InvertedDocEndConsumerPerField>>();
foreach (KeyValuePair<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>> entry in new Support.Dictionary<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>>(threadsAndFields))
{
DocInverterPerThread perThread = (DocInverterPerThread)entry.Key;
List<InvertedDocConsumerPerField> childFields = new List<InvertedDocConsumerPerField>();
List<InvertedDocEndConsumerPerField> endChildFields = new List<InvertedDocEndConsumerPerField>();
foreach (DocFieldConsumerPerField field in entry.Value)
{
DocInverterPerField perField = (DocInverterPerField)field;
childFields.Add(perField.consumer);
endChildFields.Add(perField.endConsumer);
}
childThreadsAndFields[perThread.consumer] = childFields;
endChildThreadsAndFields[perThread.endConsumer] = endChildFields;
}
consumer.Flush(childThreadsAndFields, state);
endConsumer.Flush(endChildThreadsAndFields, state);
}
示例7: Flush
public override void Flush(Support.Dictionary<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state)
{
Support.Dictionary<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>> oneThreadsAndFields = new Support.Dictionary<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>>();
Support.Dictionary<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>> twoThreadsAndFields = new Support.Dictionary<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>>();
foreach (KeyValuePair<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>> entry in new Support.Dictionary<DocFieldConsumerPerThread, IList<DocFieldConsumerPerField>>(threadsAndFields))
{
DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.Key;
IList<DocFieldConsumerPerField> fields = entry.Value;
//IEnumerator<DocFieldConsumerPerField> fieldsIt = fields.GetEnumerator();
IList<DocFieldConsumerPerField> oneFields = new List<DocFieldConsumerPerField>();
IList<DocFieldConsumerPerField> twoFields = new List<DocFieldConsumerPerField>();
foreach (DocFieldConsumersPerField perField in fields)
{
oneFields.Add(perField.one);
twoFields.Add(perField.two);
}
oneThreadsAndFields[perThread.one] = oneFields;
twoThreadsAndFields[perThread.two] = twoFields;
}
one.Flush(oneThreadsAndFields, state);
two.Flush(twoThreadsAndFields, state);
}
示例8: Flush
public override void Flush(SegmentWriteState state)
{
int numDocs = state.SegmentInfo.DocCount;
if (numDocs > 0)
{
// It's possible that all documents seen in this segment
// hit non-aborting exceptions, in which case we will
// not have yet init'd the FieldsWriter:
InitFieldsWriter(state.Context);
Fill(numDocs);
}
if (FieldsWriter != null)
{
bool success = false;
try
{
FieldsWriter.Finish(state.FieldInfos, numDocs);
success = true;
}
finally
{
if (success)
{
IOUtils.Close(FieldsWriter);
}
else
{
IOUtils.CloseWhileHandlingException(FieldsWriter);
}
}
}
}
示例9: Flush
public override void Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
{
lock (this)
{
if (tvx != null)
{
if (state.numDocsInStore > 0)
// In case there are some final documents that we
// didn't see (because they hit a non-aborting exception):
Fill(state.numDocsInStore - docWriter.GetDocStoreOffset());
tvx.Flush();
tvd.Flush();
tvf.Flush();
}
System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
while (it.MoveNext())
{
System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
System.Collections.IEnumerator it2 = ((System.Collections.ICollection) entry.Value).GetEnumerator();
while (it2.MoveNext())
{
TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField) ((System.Collections.DictionaryEntry) it2.Current).Key;
perField.termsHashPerField.Reset();
perField.ShrinkHash();
}
TermVectorsTermsWriterPerThread perThread = (TermVectorsTermsWriterPerThread) entry.Key;
perThread.termsHashPerThread.Reset(true);
}
}
}
示例10: Flush
public override void Flush(IDictionary<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state)
{
var oneThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
var twoThreadsAndFields = new HashMap<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>>();
foreach(var entry in threadsAndFields)
{
DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.Key;
ICollection<DocFieldConsumerPerField> fields = entry.Value;
IEnumerator<DocFieldConsumerPerField> fieldsIt = fields.GetEnumerator();
ICollection<DocFieldConsumerPerField> oneFields = new HashSet<DocFieldConsumerPerField>();
ICollection<DocFieldConsumerPerField> twoFields = new HashSet<DocFieldConsumerPerField>();
while (fieldsIt.MoveNext())
{
DocFieldConsumersPerField perField = (DocFieldConsumersPerField) fieldsIt.Current;
oneFields.Add(perField.one);
twoFields.Add(perField.two);
}
oneThreadsAndFields[perThread.one] = oneFields;
twoThreadsAndFields[perThread.two] = twoFields;
}
one.Flush(oneThreadsAndFields, state);
two.Flush(twoThreadsAndFields, state);
}
示例11: Flush
public override void Flush(IDictionary<DocFieldConsumerPerThread, ICollection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state)
{
var childThreadsAndFields = new HashMap<InvertedDocConsumerPerThread, ICollection<InvertedDocConsumerPerField>>();
var endChildThreadsAndFields = new HashMap<InvertedDocEndConsumerPerThread, ICollection<InvertedDocEndConsumerPerField>>();
foreach (var entry in threadsAndFields)
{
var perThread = (DocInverterPerThread) entry.Key;
ICollection<InvertedDocConsumerPerField> childFields = new HashSet<InvertedDocConsumerPerField>();
ICollection<InvertedDocEndConsumerPerField> endChildFields = new HashSet<InvertedDocEndConsumerPerField>();
foreach(DocFieldConsumerPerField field in entry.Value)
{
var perField = (DocInverterPerField)field;
childFields.Add(perField.consumer);
endChildFields.Add(perField.endConsumer);
}
childThreadsAndFields[perThread.consumer] = childFields;
endChildThreadsAndFields[perThread.endConsumer] = endChildFields;
}
consumer.Flush(childThreadsAndFields, state);
endConsumer.Flush(endChildThreadsAndFields, state);
}
示例12: CloseDocStore
public override void CloseDocStore(SegmentWriteState state)
{
try
{
one.CloseDocStore(state);
}
finally
{
two.CloseDocStore(state);
}
}
示例13: Flush
public override void Flush(IDictionary<string, DocFieldConsumerPerField> fieldsToFlush, SegmentWriteState state)
{
IDictionary<string, InvertedDocConsumerPerField> childFieldsToFlush = new Dictionary<string, InvertedDocConsumerPerField>();
IDictionary<string, InvertedDocEndConsumerPerField> endChildFieldsToFlush = new Dictionary<string, InvertedDocEndConsumerPerField>();
foreach (KeyValuePair<string, DocFieldConsumerPerField> fieldToFlush in fieldsToFlush)
{
DocInverterPerField perField = (DocInverterPerField)fieldToFlush.Value;
childFieldsToFlush[fieldToFlush.Key] = perField.Consumer;
endChildFieldsToFlush[fieldToFlush.Key] = perField.EndConsumer;
}
Consumer.Flush(childFieldsToFlush, state);
EndConsumer.Flush(endChildFieldsToFlush, state);
}
示例14: FormatPostingsDocsWriter
internal FormatPostingsDocsWriter(SegmentWriteState state, FormatPostingsTermsWriter parent):base()
{
this.parent = parent;
System.String fileName = IndexFileNames.SegmentFileName(parent.parent.segment, IndexFileNames.FREQ_EXTENSION);
SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, fileName);
out_Renamed = parent.parent.dir.CreateOutput(fileName);
totalNumDocs = parent.parent.totalNumDocs;
// TODO: abstraction violation
skipInterval = parent.parent.termsOut.skipInterval;
skipListWriter = parent.parent.skipListWriter;
skipListWriter.SetFreqOutput(out_Renamed);
posWriter = new FormatPostingsPositionsWriter(state, this);
}
示例15: FieldsConsumer
public override FieldsConsumer FieldsConsumer(SegmentWriteState state)
{
PostingsWriterBase docs = new Lucene41PostingsWriter(state);
// TODO: should we make the terms index more easily
// pluggable? Ie so that this codec would record which
// index impl was used, and switch on loading?
// Or... you must make a new Codec for this?
TermsIndexWriterBase indexWriter;
bool success = false;
try
{
indexWriter = new FixedGapTermsIndexWriter(state);
success = true;
}
finally
{
if (!success)
{
docs.Dispose();
}
}
success = false;
try
{
// Must use BlockTermsWriter (not BlockTree) because
// BlockTree doens't support ords (yet)...
FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, docs);
success = true;
return ret;
}
finally
{
if (!success)
{
try
{
docs.Dispose();
}
finally
{
indexWriter.Dispose();
}
}
}
}