本文整理汇总了C#中Lucene.Net.Store.IndexOutput.GetFilePointer方法的典型用法代码示例。如果您正苦于以下问题:C# Lucene.Net.Store.IndexOutput.GetFilePointer方法的具体用法?C# Lucene.Net.Store.IndexOutput.GetFilePointer怎么用?C# Lucene.Net.Store.IndexOutput.GetFilePointer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Lucene.Net.Store.IndexOutput
的用法示例。
在下文中一共展示了Lucene.Net.Store.IndexOutput.GetFilePointer方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: CopyFile
/// <summary>Copy the contents of the file with specified extension into the
/// provided output stream. Use the provided buffer for moving data
/// to reduce memory allocation.
/// </summary>
private void CopyFile(FileEntry source, IndexOutput os, byte[] buffer)
{
IndexInput is_Renamed = null;
try
{
long startPtr = os.GetFilePointer();
is_Renamed = directory.OpenInput(source.file);
long length = is_Renamed.Length();
long remainder = length;
int chunk = buffer.Length;
while (remainder > 0)
{
int len = (int) System.Math.Min(chunk, remainder);
is_Renamed.ReadBytes(buffer, 0, len, false);
os.WriteBytes(buffer, len);
remainder -= len;
if (checkAbort != null)
// Roughly every 2 MB we will check if
// it's time to abort
checkAbort.Work(80);
}
// Verify that remainder is 0
if (remainder != 0)
throw new System.IO.IOException("Non-zero remainder length after copying: " + remainder + " (id: " + source.file + ", length: " + length + ", buffer size: " + chunk + ")");
// Verify that the output length diff is equal to original file
long endPtr = os.GetFilePointer();
long diff = endPtr - startPtr;
if (diff != length)
throw new System.IO.IOException("Difference in the output file offsets " + diff + " does not match the original file length " + length);
}
finally
{
if (is_Renamed != null)
is_Renamed.Close();
}
}
示例2: AppendPostings
/* Walk through all unique text tokens (Posting
* instances) found in this field and serialize them
* into a single RAM segment. */
void AppendPostings(DocumentsWriter.FlushState flushState,
FreqProxTermsWriterPerField[] fields,
TermInfosWriter termsOut,
IndexOutput freqOut,
IndexOutput proxOut,
DefaultSkipListWriter skipListWriter)
{
int fieldNumber = fields[0].fieldInfo.number;
int numFields = fields.Length;
FreqProxFieldMergeState[] mergeStates = new FreqProxFieldMergeState[numFields];
for (int i = 0; i < numFields; i++)
{
FreqProxFieldMergeState fms = mergeStates[i] = new FreqProxFieldMergeState(fields[i]);
System.Diagnostics.Debug.Assert(fms.field.fieldInfo == fields[0].fieldInfo);
// Should always be true
bool result = fms.nextTerm();
System.Diagnostics.Debug.Assert(result);
}
int skipInterval = termsOut.skipInterval;
bool currentFieldOmitTf = fields[0].fieldInfo.omitTf;
// If current field omits tf then it cannot store
// payloads. We silently drop the payloads in this case:
bool currentFieldStorePayloads = currentFieldOmitTf ? false : fields[0].fieldInfo.storePayloads;
FreqProxFieldMergeState[] termStates = new FreqProxFieldMergeState[numFields];
while (numFields > 0)
{
// Get the next term to merge
termStates[0] = mergeStates[0];
int numToMerge = 1;
for (int i = 1; i < numFields; i++)
{
char[] text = mergeStates[i].text;
int textOffset = mergeStates[i].textOffset;
int cmp = compareText(text, textOffset, termStates[0].text, termStates[0].textOffset);
if (cmp < 0)
{
termStates[0] = mergeStates[i];
numToMerge = 1;
}
else if (cmp == 0)
termStates[numToMerge++] = mergeStates[i];
}
int df = 0;
int lastPayloadLength = -1;
int lastDoc = 0;
char[] text_Renamed = termStates[0].text;
int start = termStates[0].textOffset;
long freqPointer = freqOut.GetFilePointer();
long proxPointer;
if (proxOut != null)
proxPointer = proxOut.GetFilePointer();
else
proxPointer = 0;
skipListWriter.ResetSkip();
// Now termStates has numToMerge FieldMergeStates
// which all share the same term. Now we must
// interleave the docID streams.
while (numToMerge > 0)
{
if ((++df % skipInterval) == 0)
{
skipListWriter.SetSkipData(lastDoc, currentFieldStorePayloads, lastPayloadLength);
skipListWriter.BufferSkip(df);
}
FreqProxFieldMergeState minState = termStates[0];
for (int i = 1; i < numToMerge; i++)
if (termStates[i].docID < minState.docID)
minState = termStates[i];
int doc = minState.docID;
int termDocFreq = minState.termFreq;
System.Diagnostics.Debug.Assert(doc < flushState.numDocsInRAM);
System.Diagnostics.Debug.Assert(doc > lastDoc || df == 1);
ByteSliceReader prox = minState.prox;
// Carefully copy over the prox + payload info,
//.........这里部分代码省略.........
示例3: WriteSkip
/// <summary> Writes the buffered skip lists to the given output.
///
/// </summary>
/// <param name="output">the IndexOutput the skip lists shall be written to
/// </param>
/// <returns> the pointer the skip list starts
/// </returns>
internal virtual long WriteSkip(IndexOutput output)
{
long skipPointer = output.GetFilePointer();
if (skipBuffer == null || skipBuffer.Length == 0)
return skipPointer;
for (int level = numberOfSkipLevels - 1; level > 0; level--)
{
long length = skipBuffer[level].GetFilePointer();
if (length > 0)
{
output.WriteVLong(length);
skipBuffer[level].WriteTo(output);
}
}
skipBuffer[0].WriteTo(output);
return skipPointer;
}
示例4: AppendPostings
/* Walk through all unique text tokens (Posting
* instances) found in this field and serialize them
* into a single RAM segment. */
internal void AppendPostings(ThreadState.FieldData[] fields, TermInfosWriter termsOut, IndexOutput freqOut, IndexOutput proxOut)
{
int fieldNumber = fields[0].fieldInfo.number;
int numFields = fields.Length;
FieldMergeState[] mergeStates = new FieldMergeState[numFields];
for (int i = 0; i < numFields; i++)
{
FieldMergeState fms = mergeStates[i] = new FieldMergeState();
fms.field = fields[i];
fms.postings = fms.field.SortPostings();
System.Diagnostics.Debug.Assert(fms.field.fieldInfo == fields [0].fieldInfo);
// Should always be true
bool result = fms.NextTerm();
System.Diagnostics.Debug.Assert(result);
}
int skipInterval = termsOut.skipInterval;
currentFieldStorePayloads = fields[0].fieldInfo.storePayloads;
FieldMergeState[] termStates = new FieldMergeState[numFields];
while (numFields > 0)
{
// Get the next term to merge
termStates[0] = mergeStates[0];
int numToMerge = 1;
for (int i = 1; i < numFields; i++)
{
char[] text = mergeStates[i].text;
int textOffset = mergeStates[i].textOffset;
int cmp = CompareText(text, textOffset, termStates[0].text, termStates[0].textOffset);
if (cmp < 0)
{
termStates[0] = mergeStates[i];
numToMerge = 1;
}
else if (cmp == 0)
termStates[numToMerge++] = mergeStates[i];
}
int df = 0;
int lastPayloadLength = - 1;
int lastDoc = 0;
char[] text2 = termStates[0].text;
int start = termStates[0].textOffset;
int pos = start;
while (text2[pos] != 0xffff)
pos++;
long freqPointer = freqOut.GetFilePointer();
long proxPointer = proxOut.GetFilePointer();
skipListWriter.ResetSkip();
// Now termStates has numToMerge FieldMergeStates
// which all share the same term. Now we must
// interleave the docID streams.
while (numToMerge > 0)
{
if ((++df % skipInterval) == 0)
{
skipListWriter.SetSkipData(lastDoc, currentFieldStorePayloads, lastPayloadLength);
skipListWriter.BufferSkip(df);
}
FieldMergeState minState = termStates[0];
for (int i = 1; i < numToMerge; i++)
if (termStates[i].docID < minState.docID)
minState = termStates[i];
int doc = minState.docID;
int termDocFreq = minState.termFreq;
System.Diagnostics.Debug.Assert(doc < numDocsInRAM);
System.Diagnostics.Debug.Assert(doc > lastDoc || df == 1);
int newDocCode = (doc - lastDoc) << 1;
lastDoc = doc;
ByteSliceReader prox = minState.prox;
// Carefully copy over the prox + payload info,
// changing the format to match Lucene's segment
// format.
for (int j = 0; j < termDocFreq; j++)
{
//.........这里部分代码省略.........