本文整理汇总了C#中PrepareLogRecord.GetSizeWithLengthPrefix方法的典型用法代码示例。如果您正苦于以下问题:C# PrepareLogRecord.GetSizeWithLengthPrefix方法的具体用法?C# PrepareLogRecord.GetSizeWithLengthPrefix怎么用?C# PrepareLogRecord.GetSizeWithLengthPrefix使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PrepareLogRecord
的用法示例。
在下文中一共展示了PrepareLogRecord.GetSizeWithLengthPrefix方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: a_record_can_be_written
public void a_record_can_be_written()
{
var filename = Path.Combine(PathName, "prefix.tf0");
var chunkHeader = new ChunkHeader(1, 10000, 0, 0, 0);
var chunkBytes = chunkHeader.AsByteArray();
var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize];
Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length);
File.WriteAllBytes(filename, buf);
_checkpoint = new InMemoryCheckpoint(137);
var db = new TFChunkDb(new TFChunkDbConfig(PathName,
new PrefixFileNamingStrategy(PathName, "prefix.tf"),
chunkHeader.ChunkSize,
0,
_checkpoint,
new ICheckpoint[0]));
db.OpenVerifyAndClean();
var writer = new TFChunkWriter(db);
var record = new PrepareLogRecord(logPosition: 0,
correlationId: _correlationId,
eventId: _eventId,
transactionPosition: 0,
eventStreamId: "WorldEnding",
expectedVersion: 1234,
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[8000],
metadata: new byte[] { 7, 17 });
Console.WriteLine(record.GetSizeWithLengthPrefix());
Console.WriteLine(record.GetSizeWithLengthPrefix() + 137);
long pos;
Assert.IsTrue(writer.Write(record, out pos));
writer.Close();
db.Dispose();
Assert.AreEqual(record.GetSizeWithLengthPrefix() + 137, _checkpoint.Read());
using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
{
filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin);
var reader = new BinaryReader(filestream);
var read = LogRecord.ReadFrom(reader);
Console.WriteLine(string.Join("\n", Directory.EnumerateFiles(PathName)));
Assert.AreEqual(record, read);
}
}
开发者ID:vishal-h,项目名称:EventStore-1,代码行数:51,代码来源:when_writing_an_existing_chunked_transaction_file_with_checksum_and_data_bigger_than_buffer.cs
示例2: a_record_can_be_written
public void a_record_can_be_written()
{
var filename = Path.Combine(PathName, "prefix.tf0");
File.WriteAllBytes(filename, new byte[10000]);
_checkpoint = new InMemoryCheckpoint(137);
var tf = new MultifileTransactionFileWriter(
new TransactionFileDatabaseConfig(PathName, "prefix.tf", 100000, _checkpoint, new List<ICheckpoint>()));
tf.Open();
var record = new PrepareLogRecord(logPosition: 0,
correlationId: _correlationId,
eventId: _eventId,
transactionPosition: 0,
eventStreamId: "WorldEnding",
expectedVersion: 1234,
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[8000],
metadata: new byte[] {7, 17});
long tmp;
tf.Write(record, out tmp);
tf.Close();
Assert.AreEqual(record.GetSizeWithLengthPrefix() + 137, _checkpoint.Read());
using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
{
filestream.Seek(137 + sizeof(int), SeekOrigin.Begin);
var reader = new BinaryReader(filestream);
var read = LogRecord.ReadFrom(reader);
Assert.AreEqual(record, read);
}
}
开发者ID:jpierson,项目名称:EventStore,代码行数:31,代码来源:when_writing_an_existing_multifile_transaction_file_with_checksum_and_data_bigger_than_buffer.cs
示例3: a_record_can_be_written
public void a_record_can_be_written()
{
var filename = Path.Combine(PathName, "prefix.tf0");
var secondfilename = Path.Combine(PathName, "prefix.tf1");
File.WriteAllBytes(filename, new byte[10000]);
_checkpoint = new InMemoryCheckpoint(9990);
var tf = new MultifileTransactionFileWriter(
new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, _checkpoint, new List<ICheckpoint>()));
tf.Open();
var record = new PrepareLogRecord(logPosition: 0,
correlationId: _correlationId,
eventId: _eventId,
expectedVersion: 1234,
transactionPosition: 0,
eventStreamId: "WorldEnding",
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[] { 1, 2, 3, 4, 5 },
metadata: new byte[] {7, 17});
long tmp;
tf.Write(record, out tmp);
tf.Close();
Assert.AreEqual(record.GetSizeWithLengthPrefix() + 9990, _checkpoint.Read()); //9990 is fluff assigned to beginning of checkpoint
Assert.IsTrue(File.Exists(secondfilename));
var stream = new MemoryStream();
var buffer = new byte[256];
using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
{
filestream.Seek(9990, SeekOrigin.Begin);
filestream.Read(buffer, 0, 10);
stream.Write(buffer, 0, 10);
}
using (var filestream = File.Open(secondfilename, FileMode.Open, FileAccess.Read))
{
filestream.Seek(0, SeekOrigin.Begin);
filestream.Read(buffer, 0, record.GetSizeWithLengthPrefix() - 10);
stream.Write(buffer, 0, record.GetSizeWithLengthPrefix() - 10);
}
stream.Seek(0 + sizeof(int), SeekOrigin.Begin);
var reader = new BinaryReader(stream);
var read = LogRecord.ReadFrom(reader);
Assert.AreEqual(record, read);
}
开发者ID:jpierson,项目名称:EventStore,代码行数:44,代码来源:when_writing_an_existing_multifile_transaction_file_with_checksum_that_overlaps.cs
示例4: a_record_can_be_written
public void a_record_can_be_written()
{
var filename = Path.Combine(PathName, "prefix.tf0");
var chunkHeader = new ChunkHeader(1, 10000, 0, 0, 0);
var chunkBytes = chunkHeader.AsByteArray();
var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size];
Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length);
File.WriteAllBytes(filename, bytes);
_checkpoint = new InMemoryCheckpoint(137);
var db = new TFChunkDb(new TFChunkDbConfig(PathName,
new PrefixFileNamingStrategy(PathName, "prefix.tf"),
10000,
0,
_checkpoint,
new ICheckpoint[0]));
db.OpenVerifyAndClean();
var tf = new TFChunkWriter(db);
var record = new PrepareLogRecord(logPosition: 0,
correlationId: _correlationId,
eventId: _eventId,
expectedVersion: 1234,
transactionPosition: 0,
eventStreamId: "WorldEnding",
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[] { 1, 2, 3, 4, 5 },
metadata: new byte[] { 7, 17 });
long tmp;
tf.Write(record, out tmp);
//tf.Flush();
tf.Close();
db.Dispose();
Assert.AreEqual(record.GetSizeWithLengthPrefix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint
//TODO actually read the event
using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
{
filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin);
var reader = new BinaryReader(filestream);
var read = LogRecord.ReadFrom(reader);
Assert.AreEqual(record, read);
}
}
开发者ID:jpierson,项目名称:EventStore,代码行数:45,代码来源:when_writing_an_existing_chunked_transaction_file_with_checksum.cs
示例5: a_record_can_be_written
public void a_record_can_be_written()
{
_checkpoint = new InMemoryCheckpoint(0);
var db = new TFChunkDb(new TFChunkDbConfig(PathName,
new PrefixFileNamingStrategy(PathName, "prefix.tf"),
1000,
0,
_checkpoint,
new ICheckpoint[0]));
db.OpenVerifyAndClean();
var tf = new TFChunkWriter(db);
tf.Open();
var record = new PrepareLogRecord(logPosition: 0,
correlationId: _correlationId,
eventId: _eventId,
transactionPosition: 0,
eventStreamId: "WorldEnding",
expectedVersion: 1234,
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[] { 1, 2, 3, 4, 5 },
metadata: new byte[] { 7, 17 });
long tmp;
tf.Write(record, out tmp);
tf.Close();
db.Dispose();
Assert.AreEqual(record.GetSizeWithLengthPrefix(), _checkpoint.Read());
using (var filestream = File.Open(Path.Combine(PathName, "prefix.tf0"), FileMode.Open, FileAccess.Read))
{
filestream.Position = ChunkHeader.Size;
var reader = new BinaryReader(filestream);
reader.ReadInt32();
var read = LogRecord.ReadFrom(reader);
Assert.AreEqual(record, read);
}
}
示例6: can_read_a_record_with_length_straddling_multiple_files
public void can_read_a_record_with_length_straddling_multiple_files()
{
var writerchk = new InMemoryCheckpoint(20020);
var readerchk = new InMemoryCheckpoint("reader", 9998);
var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk});
var recordToWrite = new PrepareLogRecord(logPosition: 0,
correlationId: _correlationId,
eventId: _eventId,
transactionPosition: 0,
eventStreamId: "WorldEnding",
expectedVersion: 1234,
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[] { 1, 2, 3, 4, 5 },
metadata: new byte[] {7, 17});
var memstream = new MemoryStream();
var writer = new BinaryWriter(memstream);
recordToWrite.WriteWithLengthPrefixTo(writer);
var buf = memstream.GetBuffer();
using (var fs = new FileStream(config.FileNamingStrategy.GetFilenameFor(0), FileMode.CreateNew, FileAccess.Write))
{
fs.Seek(9998, SeekOrigin.Begin);
fs.Write(buf, 0, 2);
fs.Close();
}
using (var fs = new FileStream(config.FileNamingStrategy.GetFilenameFor(1), FileMode.CreateNew, FileAccess.Write))
{
fs.Seek(0, SeekOrigin.Begin);
fs.Write(buf, 2, recordToWrite.GetSizeWithLengthPrefix() - 2);
fs.Close();
}
var reader = new MultifileTransactionFileChaser(config, "reader");
reader.Open();
LogRecord record = null;
var readRecord = reader.TryReadNext(out record);
reader.Close();
Assert.IsTrue(readRecord);
Assert.AreEqual(recordToWrite, record);
Assert.AreEqual(9998 + recordToWrite.GetSizeWithLengthPrefix(), readerchk.Read());
}
示例7: try_read_returns_record_when_writerchecksum_equal
public void try_read_returns_record_when_writerchecksum_equal()
{
var writerchk = new InMemoryCheckpoint(0);
var readerchk = new InMemoryCheckpoint("reader", 0);
var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk,
new List<ICheckpoint> {readerchk});
var recordToWrite = new PrepareLogRecord(logPosition: 0,
correlationId: _correlationId,
eventId: _eventId,
transactionPosition: 0,
eventStreamId: "WorldEnding",
expectedVersion: 1234,
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[] { 1, 2, 3, 4, 5 },
metadata: new byte[] {7, 17});
using (var fs = new FileStream(Path.Combine(PathName, "prefix.tf0"), FileMode.CreateNew, FileAccess.Write))
{
var writer = new BinaryWriter(fs);
recordToWrite.WriteWithLengthPrefixTo(writer);
fs.Close();
}
writerchk.Write(recordToWrite.GetSizeWithLengthPrefix());
var reader = new MultifileTransactionFileChaser(config, "reader");
reader.Open();
LogRecord record = null;
var readRecord = reader.TryReadNext(out record);
reader.Close();
Assert.IsTrue(readRecord);
Assert.AreEqual(record.GetSizeWithLengthPrefix(), readerchk.Read());
Assert.AreEqual(recordToWrite, record);
}
示例8: a_record_is_not_written_at_first_but_written_on_second_try
public void a_record_is_not_written_at_first_but_written_on_second_try()
{
var filename1 = Path.Combine(PathName, "prefix.tf0");
var filename2 = Path.Combine(PathName, "prefix.tf1");
var chunkHeader = new ChunkHeader(1, 10000, 0, 0, 0);
var chunkBytes = chunkHeader.AsByteArray();
var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size];
Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length);
File.WriteAllBytes(filename1, bytes);
_checkpoint = new InMemoryCheckpoint(0);
var db = new TFChunkDb(new TFChunkDbConfig(PathName,
new PrefixFileNamingStrategy(PathName, "prefix.tf"),
10000,
0,
_checkpoint,
new ICheckpoint[0]));
db.OpenVerifyAndClean();
var tf = new TFChunkWriter(db);
long pos;
var record1 = new PrepareLogRecord(logPosition: 0,
correlationId: _correlationId,
eventId: _eventId,
expectedVersion: 1234,
transactionPosition: 0,
eventStreamId: "WorldEnding",
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[] { 1, 2, 3, 4, 5 },
metadata: new byte[8000]);
Assert.IsTrue(tf.Write(record1, out pos)); // almost fill up first chunk
var record2 = new PrepareLogRecord(logPosition: pos,
correlationId: _correlationId,
eventId: _eventId,
expectedVersion: 1234,
transactionPosition: pos,
eventStreamId: "WorldEnding",
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[] { 1, 2, 3, 4, 5 },
metadata: new byte[8000]);
Assert.IsFalse(tf.Write(record2, out pos)); // chunk has too small space
var record3 = new PrepareLogRecord(logPosition: pos,
correlationId: _correlationId,
eventId: _eventId,
expectedVersion: 1234,
transactionPosition: pos,
eventStreamId: "WorldEnding",
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[] { 1, 2, 3, 4, 5 },
metadata: new byte[2000]);
Assert.IsTrue(tf.Write(record3, out pos));
tf.Close();
db.Dispose();
Assert.AreEqual(record3.GetSizeWithLengthPrefix() + 10000, _checkpoint.Read());
using (var filestream = File.Open(filename2, FileMode.Open, FileAccess.Read))
{
filestream.Seek(ChunkHeader.Size + sizeof(int), SeekOrigin.Begin);
var reader = new BinaryReader(filestream);
var read = LogRecord.ReadFrom(reader);
Assert.AreEqual(record3, read);
}
}
开发者ID:vishal-h,项目名称:EventStore-1,代码行数:71,代码来源:when_writing_an_existing_chunked_transaction_file_with_not_enough_space_in_chunk.cs
示例9: try_read_returns_record_when_writerchecksum_equal
public void try_read_returns_record_when_writerchecksum_equal()
{
var writerchk = new InMemoryCheckpoint(0);
var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0);
var db = new TFChunkDb(new TFChunkDbConfig(PathName,
new PrefixFileNamingStrategy(PathName, "prefix.tf"),
10000,
0,
writerchk,
new[] {chaserchk}));
db.OpenVerifyAndClean();
var recordToWrite = new PrepareLogRecord(logPosition: 0,
correlationId: _correlationId,
eventId: _eventId,
transactionPosition: 0,
eventStreamId: "WorldEnding",
expectedVersion: 1234,
timeStamp: new DateTime(2012, 12, 21),
flags: PrepareFlags.None,
eventType: "type",
data: new byte[] { 1, 2, 3, 4, 5 },
metadata: new byte[] { 7, 17 });
var writer = new TFChunkWriter(db);
writer.Open();
long pos;
Assert.IsTrue(writer.Write(recordToWrite, out pos));
writer.Close();
writerchk.Write(recordToWrite.GetSizeWithLengthPrefix());
var chaser = new TFChunkChaser(db, writerchk, chaserchk);
chaser.Open();
LogRecord record;
var readRecord = chaser.TryReadNext(out record);
chaser.Close();
Assert.IsTrue(readRecord);
Assert.AreEqual(record.GetSizeWithLengthPrefix(), chaserchk.Read());
Assert.AreEqual(recordToWrite, record);
db.Close();
}