本文整理汇总了C#中EndianBinaryReader.ReadInt64方法的典型用法代码示例。如果您正苦于以下问题:C# EndianBinaryReader.ReadInt64方法的具体用法?C# EndianBinaryReader.ReadInt64怎么用?C# EndianBinaryReader.ReadInt64使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类EndianBinaryReader
的用法示例。
在下文中一共展示了EndianBinaryReader.ReadInt64方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: FromByteArray
public void FromByteArray(ref byte[] datagram)
{
using (MemoryStream buffer = new MemoryStream(datagram))
{
using (EndianBinaryReader br = new EndianBinaryReader(new BigEndianBitConverter(), buffer))
{
action = br.ReadInt32();
transaction_id = br.ReadInt32();
connection_id = br.ReadInt64();
}
}
}
示例2: Parse
protected override void Parse(EndianBinaryReader r)
{
Time = r.ReadInt64();
}
示例3: Parse
protected override void Parse(EndianBinaryReader r)
{
this.Time = r.ReadInt64();
DebugGotAll(r);
}
示例4: Load
public void Load(Stream inputStream)
{
this.SetInternalStream(inputStream);
DisposeLevels();
_zoomLevels.Clear();
_reader = new EndianBinaryReader(EndianBitConverter.Little, inputStream);
// Check magic bytes
if (_reader.ReadInt64() != 0x08002b4949)
throw new Exception("Invalid ZIF file");
while (_reader.BaseStream.Position < MAX_ZIF_BYTES)
{
ulong offset = _reader.ReadUInt64();
if (offset == 0)
break;
_reader.Seek(offset, SeekOrigin.Begin);
var numTags = _reader.ReadUInt64();
var level = new ZoomLevel(_reader);
for (ulong i = 0; i < numTags; ++i)
{
var key = _reader.ReadUInt16();
var notUsed = _reader.ReadUInt16();
var val1 = _reader.ReadUInt64();
var val2 = _reader.ReadUInt64();
level.AddTag(key, val1, val2);
}
_zoomLevels.Insert(0, level);
}
}
示例5: Propagate
private static bool Propagate(FileStream fh, EndianBinaryReader reader, EndianBinaryWriter writer, Header header, long timestamp, ArchiveInfo higher, ArchiveInfo lower)
{
var aggregationType = header.AggregationType;
var xff = header.xFilesFactor;
var lowerIntervalStart = timestamp - timestamp.Mod(lower.SecondsPerPoint);
var lowerIntervalEnd = lowerIntervalStart + lower.SecondsPerPoint;
fh.Seek(higher.Offset, SeekOrigin.Begin);
var higherBaseInterval = reader.ReadInt64(); // timestamp
var higherBaseValue = reader.ReadDouble(); // value
long higherFirstOffset;
if (higherBaseInterval == 0)
{
higherFirstOffset = higher.Offset;
}
else
{
var timeDistance = lowerIntervalStart - higherBaseInterval;
var pointDistance = timeDistance / higher.SecondsPerPoint;
var byteDistance = pointDistance * PointSize;
higherFirstOffset = higher.Offset + byteDistance.Mod(higher.Size);
}
var higherPoints = lower.SecondsPerPoint / higher.SecondsPerPoint;
var higherSize = higherPoints * PointSize;
var relativeFirstOffset = higherFirstOffset - higher.Offset;
var relativeLastOffset = (relativeFirstOffset + higherSize).Mod(higher.Size);
var higherLastOffset = relativeLastOffset + higher.Offset;
fh.Seek(higherFirstOffset, SeekOrigin.Begin);
byte[] seriesBuffer;
int bytesRead = 0;
if (higherFirstOffset < higherLastOffset)
{
seriesBuffer = new byte[(int)(higherLastOffset - higherFirstOffset)];
// we don't wrap the archive
bytesRead = fh.Read(seriesBuffer, 0, seriesBuffer.Length);
}
else
{
var higherEnd = higher.Offset + higher.Size;
var firstPart = (int)(higherEnd - higherFirstOffset);
var secondPart = (int)(higherLastOffset - higher.Offset);
seriesBuffer = new byte[firstPart + secondPart];
bytesRead += fh.Read(seriesBuffer, 0, firstPart);
fh.Seek(higher.Offset, SeekOrigin.Begin);
bytesRead += fh.Read(seriesBuffer, firstPart, secondPart);
//var archiveEnd = higher.Offset + higher.Size;
//seriesBuffer = new byte[(int)(archiveEnd - higherFirstOffset) + (int)(higherLastOffset - higher.Offset)];
//// We do wrap around the archive, so we need two reads
//bytesRead += fh.Read(seriesBuffer, 0, (int)(archiveEnd - higherFirstOffset));
//if (higherLastOffset < higherFirstOffset)
//{
// fh.Seek(higher.Offset, SeekOrigin.Begin);
// bytesRead += fh.Read(seriesBuffer, 0, (int)(higherLastOffset - higher.Offset));
//}
}
var neighborValues = UnpackSeriesBuffer(seriesBuffer, bytesRead);
// Propagate aggregateValue to propagate from neighborValues if we have enough known points
var knownValues = neighborValues.Where(x => !x.Equals(default(PointPair)) && x.Timestamp != default(long)).Select(x => x.value);
if (knownValues.Count() == 0)
{
return false;
}
var knownPercent = (double)knownValues.Count() / (double)neighborValues.Length;
Debug.WriteLine(string.Format("Calculate Aggregate xff = {0} for {1} points", knownPercent, knownValues.Count()));
if (knownPercent >= xff)
{
// we have enough data to propagte a value
var aggregateValue = Aggregate(aggregationType, knownValues);
fh.Seek(lower.Offset, SeekOrigin.Begin);
var lowerBaseInterval = reader.ReadInt64(); // timestamp
var lowerBaseValue = reader.ReadDouble(); // value
if (lowerBaseInterval == 0)
{
// First propagated update to this lower archive
fh.Seek(lower.Offset, SeekOrigin.Begin);
writer.Write(lowerIntervalStart);
writer.Write(aggregateValue);
Debug.WriteLine(string.Format("writing aggregate point ({0},{1}) to position {2} - first update", lowerIntervalStart, aggregateValue, lower.Offset));
}
else
{
// Not our first propagated update to this lower archive
var timeDistance = lowerIntervalStart - lowerBaseInterval;
var pointDistance = timeDistance / lower.SecondsPerPoint;
var byteDistance = pointDistance * PointSize;
var lowerOffset = lower.Offset + (byteDistance.Mod(lower.Size));
Debug.WriteLine(string.Format("calculating aggregate offset int = {0} base = {1} td = {2} pd = {3} bd = {4} offset = {5}", lowerIntervalStart, lowerBaseInterval, timeDistance, pointDistance, byteDistance, lowerOffset));
fh.Seek(lowerOffset, SeekOrigin.Begin);
writer.Write(lowerIntervalStart);
//.........这里部分代码省略.........
示例6: FileUpdate
private static void FileUpdate(FileStream fh, double value, long? timestamp, long? now)
{
var header = ReadHeader(fh);
now = now ?? DateTime.UtcNow.ToUnixTime();
if (!timestamp.HasValue)
{
timestamp = now.Value;
}
var diff = now - timestamp;
if (!(diff < header.MaxRetention && diff >= 0))
{
throw new TimestampNotCoveredException("Timestamp not covered by any archives in this database.");
}
List<ArchiveInfo> lowerArchives = null;
ArchiveInfo archive = new ArchiveInfo();
for (int i = 0; i < header.ArchiveList.Count; i++)
{
archive = header.ArchiveList[i];
// Find the highest-precision archive that covers timestamp
if (archive.Retention < diff)
{
continue;
}
// We'll pass on the update to these lower precision archives later
lowerArchives = header.ArchiveList.Skip(i + 1).ToList();
break;
}
using (var reader = new EndianBinaryReader(EndianBitConverter.Big, new NonClosingStreamWrapper(fh)))
using (var writer = new EndianBinaryWriter(EndianBitConverter.Big, new NonClosingStreamWrapper(fh)))
{
// First we update the highest-precision archive
var myInterval = timestamp.Value - (timestamp.Mod(archive.SecondsPerPoint));
fh.Seek(archive.Offset, SeekOrigin.Begin);
var baseInterval = reader.ReadInt64(); // timestamp
var baseValue = reader.ReadDouble(); // value
if (baseInterval == 0)
{
// this file's first update
fh.Seek(archive.Offset, SeekOrigin.Begin);
writer.Write(myInterval);
writer.Write(value);
baseInterval = myInterval;
baseValue = value;
Debug.WriteLine(string.Format("writing point ({0},{1}) to position {2} - first update", myInterval, value, archive.Offset));
}
else
{
// not our first update
var timeDistance = myInterval - baseInterval;
var pointDistance = timeDistance / archive.SecondsPerPoint;
var byteDistance = pointDistance * PointSize;
var myOffset = archive.Offset + (byteDistance.Mod(archive.Size));
Debug.WriteLine(string.Format("calculating offset int = {0} base = {1} td = {2} pd = {3} bd = {4} offset = {5}", myInterval, baseInterval, timeDistance, pointDistance, byteDistance, myOffset));
fh.Seek(myOffset, SeekOrigin.Begin);
writer.Write(myInterval);
writer.Write(value);
Debug.WriteLine(string.Format("writing point ({0},{1}) to position {2}", myInterval, value, myOffset));
}
// Now we propagate the update to lower-precision archives
var higher = archive;
foreach (var lower in lowerArchives)
{
if (!Propagate(fh, reader, writer, header, myInterval, higher, lower))
{
break;
}
higher = lower;
}
}
fh.Flush(AutoFlush);
}
示例7: UnpackSeriesBuffer
private static PointPair[] UnpackSeriesBuffer(byte[] seriesBuffer, int bytesRead)
{
var valueList = new PointPair[bytesRead / PointSize];
using (var seriesMemoryStream = new MemoryStream(seriesBuffer))
{
using (var seriesReader = new EndianBinaryReader(EndianBitConverter.Big, seriesMemoryStream))
{
for (int i = 0; i < valueList.Length; i++)
{
var timestamp = seriesReader.ReadInt64();
var value = seriesReader.ReadDouble();
valueList[i] = new PointPair(timestamp, value);
//Debug.WriteLine(string.Format("Reading Point ({0},{1}) from i = {2}", timestamp, value, i));
}
}
}
return valueList;
}
示例8: ArchiveFetch
/// <summary>
/// Fetch data from a single archive. Note that checks for validity of the time
/// period requested happen above this level so it's possible to wrap around the
/// archive on a read and request data older than the archive's retention
/// </summary>
private static ArchiveFetch ArchiveFetch(FileStream fh, ArchiveInfo archive, long fromTime, long untilTime)
{
Debug.WriteLine(string.Format("ArchiveFetch from {0} to {1} in archive [{2},{3}]", fromTime, untilTime, archive.SecondsPerPoint, archive.Points));
var fromInterval = (fromTime - (fromTime.Mod(archive.SecondsPerPoint))) + (int)archive.SecondsPerPoint;
var untilInterval = (untilTime - (untilTime.Mod(archive.SecondsPerPoint))) + (int)archive.SecondsPerPoint;
fh.Seek(archive.Offset, SeekOrigin.Begin);
using (var reader = new EndianBinaryReader(EndianBitConverter.Big, new NonClosingStreamWrapper(fh)))
{
var baseInterval = reader.ReadInt64(); // timestamp
var baseValue = reader.ReadDouble(); // value
if (baseInterval == 0)
{
var step = archive.SecondsPerPoint;
var points = (int)((untilInterval - fromInterval) / step);
var _timeInfo = new TimeInfo(fromInterval, untilInterval, archive.SecondsPerPoint);
var _valueList = Enumerable.Repeat(new PointPair(0, 0), points).ToList();
return new ArchiveFetch(_timeInfo, _valueList);
}
// Determine fromOffset
var timeDistance = fromInterval - baseInterval;
var pointDistance = timeDistance / archive.SecondsPerPoint;
var byteDistance = pointDistance * PointSize;
var fromOffset = (int)(archive.Offset + (byteDistance.Mod(archive.Size)));
// Determine untilOffset
timeDistance = untilInterval - baseInterval;
pointDistance = timeDistance / archive.SecondsPerPoint;
byteDistance = pointDistance * PointSize;
var untilOffset = (int)(archive.Offset + (byteDistance.Mod(archive.Size)));
// read all the points in the interval
fh.Seek(fromOffset, SeekOrigin.Begin);
byte[] seriesBuffer;
int bytesRead = 0;
if (fromOffset < untilOffset)
{
// If we don't wrap around the archive
seriesBuffer = new byte[(int)(untilOffset - fromOffset)];
bytesRead += fh.Read(seriesBuffer, 0, seriesBuffer.Length);
if (bytesRead != seriesBuffer.Length)
{
throw new CorruptWhisperFileException(string.Format("read: {0} != {1}", bytesRead, seriesBuffer.Length));
}
Debug.WriteLine(string.Format("read {0} points starting at offset {1}", (bytesRead / PointSize), fromOffset));
}
else
{
// We do wrap around the archive, so we need two reads
var archiveEnd = archive.Offset + archive.Size;
var firstPart = (int)(archiveEnd - fromOffset);
var secondPart = (int)(untilOffset - archive.Offset);
seriesBuffer = new byte[firstPart + secondPart];
bytesRead += fh.Read(seriesBuffer, 0, firstPart);
Debug.WriteLine(string.Format("read {0} points starting at offset {1}", (firstPart / PointSize), fromOffset));
fh.Seek(archive.Offset, SeekOrigin.Begin);
bytesRead += fh.Read(seriesBuffer, firstPart, secondPart);
Debug.WriteLine(string.Format("read {0} points starting at offset {1}", (secondPart / PointSize), archive.Offset));
}
var valueList = UnpackSeriesBuffer(seriesBuffer, bytesRead);
var timeInfo = new TimeInfo(fromInterval, untilInterval, archive.SecondsPerPoint);
return new ArchiveFetch(timeInfo, valueList.Where(x => !x.Equals(default(PointPair)) && x.Timestamp != default(long)).ToList());
}
}
示例9: ReadHeader
private static Header ReadHeader(FileStream fh)
{
if (cachedHeaders.ContainsKey(fh.Name))
{
return cachedHeaders[fh.Name];
}
var originalOffest = fh.Position;
fh.Seek(0, SeekOrigin.Begin);
Header header;
using (var reader = new EndianBinaryReader(EndianBitConverter.Big, new NonClosingStreamWrapper(fh)))
{
long aggregationType;
long maxRetention;
double xff;
long archiveCount;
try
{
aggregationType = reader.ReadInt64();
maxRetention = reader.ReadInt64();
xff = reader.ReadDouble();
archiveCount = reader.ReadInt64();
}
catch (Exception e)
{
throw new CorruptWhisperFileException("Unable to read header", fh.Name, e);
}
var archives = new List<ArchiveInfo>();
for (int i = 0; i < archiveCount; i++)
{
try
{
var offset = reader.ReadInt64();
var secondsPerPoint = reader.ReadInt64();
var points = reader.ReadInt64();
archives.Add(new ArchiveInfo(secondsPerPoint, points, offset));
}
catch (Exception e)
{
throw new CorruptWhisperFileException(string.Format("Unable to read archive{0} metadata", i), fh.Name, e);
}
}
header = new Header((AggregationType)aggregationType, maxRetention, xff, archives);
}
if (CacheHeaders)
{
cachedHeaders.TryAdd(fh.Name, header);
}
return header;
}