本文整理汇总了C#中System.IO.FileStream.FlushToDisk方法的典型用法代码示例。如果您正苦于以下问题:C# FileStream.FlushToDisk方法的具体用法?C# FileStream.FlushToDisk怎么用?C# FileStream.FlushToDisk使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类System.IO.FileStream
的用法示例。
在下文中一共展示了FileStream.FlushToDisk方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Test
public void Test()
{
var rnd = new Random();
var sw = Stopwatch.StartNew();
var gw = Stopwatch.StartNew();
using (var fs = new FileStream(Filename, FileMode.OpenOrCreate))
{
const int iter = 1000;
for (int bytes = 100; bytes < 1000000; bytes *= 2)
{
var arr = new byte[bytes];
rnd.NextBytes(arr);
TimeSpan min = TimeSpan.FromHours(1);
TimeSpan max = TimeSpan.Zero;
gw.Restart();
for (int i = 0; i < iter; ++i)
{
fs.Write(arr, 0, arr.Length);
sw.Restart();
fs.FlushToDisk();
var elapsed = sw.Elapsed;
min = elapsed < min ? elapsed : min;
max = elapsed > max ? elapsed : max;
}
gw.Stop();
Console.WriteLine("{0} bytes, Min: {1}, Max: {2}, Avg: {3}",
bytes,
min,
max,
TimeSpan.FromTicks(gw.Elapsed.Ticks/iter));
}
}
}
示例2: TruncateChunkAndFillWithZeros
private void TruncateChunkAndFillWithZeros(ChunkHeader chunkHeader, string chunkFilename, long truncateChk)
{
if (chunkHeader.IsScavenged
|| chunkHeader.ChunkStartNumber != chunkHeader.ChunkEndNumber
|| truncateChk < chunkHeader.ChunkStartPosition
|| truncateChk >= chunkHeader.ChunkEndPosition)
{
throw new Exception(
string.Format("Chunk #{0}-{1} ({2}) is not correct unscavenged chunk. TruncatePosition: {3}, ChunkHeader: {4}.",
chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, chunkFilename, truncateChk, chunkHeader));
}
using (var fs = new FileStream(chunkFilename, FileMode.Open, FileAccess.ReadWrite, FileShare.Read))
{
fs.SetLength(ChunkHeader.Size + chunkHeader.ChunkSize + ChunkFooter.Size);
fs.Position = ChunkHeader.Size + chunkHeader.GetLocalLogPosition(truncateChk);
var zeros = new byte[65536];
var leftToWrite = fs.Length - fs.Position;
while (leftToWrite > 0)
{
var toWrite = (int)Math.Min(leftToWrite, zeros.Length);
fs.Write(zeros, 0, toWrite);
leftToWrite -= toWrite;
}
fs.FlushToDisk();
}
}
示例3: MergeTo
public static PTable MergeTo(IList<PTable> tables, string outputFile, Func<IndexEntry, bool> recordExistsAt, int cacheDepth = 16)
{
Ensure.NotNull(tables, "tables");
Ensure.NotNullOrEmpty(outputFile, "outputFile");
Ensure.Nonnegative(cacheDepth, "cacheDepth");
var fileSize = GetFileSize(tables); // approximate file size
if (tables.Count == 2)
return MergeTo2(tables, fileSize, outputFile, recordExistsAt, cacheDepth); // special case
Log.Trace("PTables merge started.");
var watch = Stopwatch.StartNew();
var enumerators = tables.Select(table => table.IterateAllInOrder().GetEnumerator()).ToList();
for (int i = 0; i < enumerators.Count; i++)
{
if (!enumerators[i].MoveNext())
{
enumerators[i].Dispose();
enumerators.RemoveAt(i);
i--;
}
}
long dumpedEntryCount = 0;
using (var f = new FileStream(outputFile, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.None,
DefaultSequentialBufferSize, FileOptions.SequentialScan))
{
f.SetLength(fileSize);
f.Seek(0, SeekOrigin.Begin);
using (var md5 = MD5.Create())
using (var cs = new CryptoStream(f, md5, CryptoStreamMode.Write))
using (var bs = new BufferedStream(cs, DefaultSequentialBufferSize))
{
// WRITE HEADER
var headerBytes = new PTableHeader(Version).AsByteArray();
cs.Write(headerBytes, 0, headerBytes.Length);
var buffer = new byte[IndexEntrySize];
// WRITE INDEX ENTRIES
while (enumerators.Count > 0)
{
var idx = GetMaxOf(enumerators);
var current = enumerators[idx].Current;
if (recordExistsAt(current))
{
AppendRecordTo(bs, current.Bytes, buffer);
dumpedEntryCount += 1;
}
if (!enumerators[idx].MoveNext())
{
enumerators[idx].Dispose();
enumerators.RemoveAt(idx);
}
}
bs.Flush();
cs.FlushFinalBlock();
f.FlushToDisk();
f.SetLength(f.Position + MD5Size);
// WRITE MD5
var hash = md5.Hash;
f.Write(hash, 0, hash.Length);
f.FlushToDisk();
}
}
Log.Trace("PTables merge finished in {0} ([{1}] entries merged into {2}).",
watch.Elapsed, string.Join(", ", tables.Select(x => x.Count)), dumpedEntryCount);
return new PTable(outputFile, Guid.NewGuid(), depth: cacheDepth);
}
示例4: CreateWriterWorkItemForNewChunk
private void CreateWriterWorkItemForNewChunk(ChunkHeader chunkHeader, int fileSize)
{
var md5 = MD5.Create();
// create temp file first and set desired length
// if there is not enough disk space or something else prevents file to be resized as desired
// we'll end up with empty temp file, which won't trigger false error on next DB verification
var tempFilename = string.Format("{0}.{1}.tmp", _filename, Guid.NewGuid());
var tempFile = new FileStream(tempFilename, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.Read,
WriteBufferSize, FileOptions.SequentialScan);
tempFile.SetLength(fileSize);
// we need to write header into temp file before moving it into correct chunk place, so in case of crash
// we don't end up with seemingly valid chunk file with no header at all...
WriteHeader(md5, tempFile, chunkHeader);
tempFile.FlushToDisk();
tempFile.Close();
File.Move(tempFilename, _filename);
var stream = new FileStream(_filename, FileMode.Open, FileAccess.ReadWrite, FileShare.Read,
WriteBufferSize, FileOptions.SequentialScan);
stream.Position = ChunkHeader.Size;
_writerWorkItem = new WriterWorkItem(stream, null, md5);
Flush(); // persist file move result
}
示例5: MergeTo2
private static PTable MergeTo2(IList<PTable> tables, long fileSize, string outputFile,
Func<IndexEntry, bool> recordExistsAt, int cacheDepth)
{
Log.Trace("PTables merge started (specialized for <= 2 tables).");
var watch = Stopwatch.StartNew();
var enumerators = tables.Select(table => table.IterateAllInOrder().GetEnumerator()).ToList();
long dumpedEntryCount = 0;
using (var f = new FileStream(outputFile, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.None,
DefaultSequentialBufferSize, FileOptions.SequentialScan))
{
f.SetLength(fileSize);
f.Seek(0, SeekOrigin.Begin);
using (var md5 = MD5.Create())
using (var cs = new CryptoStream(f, md5, CryptoStreamMode.Write))
using (var bs = new BufferedStream(cs, DefaultSequentialBufferSize))
{
// WRITE HEADER
var headerBytes = new PTableHeader(Version).AsByteArray();
cs.Write(headerBytes, 0, headerBytes.Length);
// WRITE INDEX ENTRIES
var buffer = new byte[IndexEntrySize];
var enum1 = enumerators[0];
var enum2 = enumerators[1];
bool available1 = enum1.MoveNext();
bool available2 = enum2.MoveNext();
IndexEntry current;
while (available1 || available2)
{
if (available1 && (!available2 || enum1.Current.CompareTo(enum2.Current) > 0))
{
current = enum1.Current;
available1 = enum1.MoveNext();
}
else
{
current = enum2.Current;
available2 = enum2.MoveNext();
}
if (recordExistsAt(current))
{
AppendRecordTo(bs, current.Bytes, buffer);
dumpedEntryCount += 1;
}
}
bs.Flush();
cs.FlushFinalBlock();
f.SetLength(f.Position + MD5Size);
// WRITE MD5
var hash = md5.Hash;
f.Write(hash, 0, hash.Length);
f.FlushToDisk();
}
}
Log.Trace("PTables merge finished in {0} ([{1}] entries merged into {2}).",
watch.Elapsed, string.Join(", ", tables.Select(x => x.Count)), dumpedEntryCount);
return new PTable(outputFile, Guid.NewGuid(), depth: cacheDepth);
}