本文整理汇总了C#中KafkaNet.Producer.GetTopicOffsetAsync方法的典型用法代码示例。如果您正苦于以下问题:C# Producer.GetTopicOffsetAsync方法的具体用法?C# Producer.GetTopicOffsetAsync怎么用?C# Producer.GetTopicOffsetAsync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类KafkaNet.Producer
的用法示例。
在下文中一共展示了Producer.GetTopicOffsetAsync方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: EnsureGzipCanDecompressMessageFromKafka
public void EnsureGzipCanDecompressMessageFromKafka()
{
var router = new BrokerRouter(_options);
var producer = new Producer(router);
var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result;
var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router) { PartitionWhitelist = new List<int>() { 0 } },
offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());
int numberOfmessage = 3;
for (int i = 0; i < numberOfmessage; i++)
{
producer.SendMessageAsync(IntegrationConfig.IntegrationCompressionTopic, new[] { new Message(i.ToString()) }, codec: MessageCodec.CodecGzip,
partition: 0);
}
var results = consumer.Consume(new CancellationTokenSource(TimeSpan.FromMinutes(1)).Token).Take(numberOfmessage).ToList();
for (int i = 0; i < numberOfmessage; i++)
{
Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString()));
}
using (producer)
using (consumer) { }
}
示例2: ConsumerShouldBeAbleToSeekBackToEarlierOffset
public void ConsumerShouldBeAbleToSeekBackToEarlierOffset()
{
var producer = new Producer(_router);
var offsets = producer.GetTopicOffsetAsync("LoadTest").Result
.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), offsets);
var tasks = new List<Task<List<ProduceResponse>>>();
for (int i = 0; i < 20; i++)
{
tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } }));
}
Task.WaitAll(tasks.ToArray());
var results = consumer.Consume().Take(20).ToList();
//ensure the produced messages arrived
for (int i = 0; i < 20; i++)
{
Assert.That(results[i].Value == i.ToString());
}
//seek back to initial offset
consumer.SetOffsetPosition(offsets);
//ensure all produced messages arrive again
for (int i = 0; i < 20; i++)
{
Assert.That(results[i].Value == i.ToString());
}
}
示例3: ConsumerShouldBeAbleToGetCurrentOffsetInformation
public void ConsumerShouldBeAbleToGetCurrentOffsetInformation()
{
var producer = new Producer(_router);
var startOffsets = producer.GetTopicOffsetAsync("LoadTest").Result
.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), startOffsets);
var tasks = new List<Task<List<ProduceResponse>>>();
for (int i = 0; i < 20; i++)
{
tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } }));
}
Task.WaitAll(tasks.ToArray());
var results = consumer.Consume().Take(20).ToList();
//ensure the produced messages arrived
for (int i = 0; i < 20; i++)
{
Assert.That(results[i].Value == i.ToString());
}
//the current offsets should be 20 positions higher than start
var currentOffsets = consumer.GetOffsetPosition();
Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20));
}
示例4: ConsumerShouldConsumeInSameOrderAsProduced
public void ConsumerShouldConsumeInSameOrderAsProduced()
{
var expected = new List<string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" };
var testId = Guid.NewGuid().ToString();
using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
using (var producer = new Producer(router))
{
var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result;
using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router),
offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
{
for (int i = 0; i < 20; i++)
{
producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
}
var results = consumer.Consume().Take(20).ToList();
//ensure the produced messages arrived
Console.WriteLine("Message order: {0}", string.Join(", ", results.Select(x => x.Value.ToUtf8String()).ToList()));
Assert.That(results.Count, Is.EqualTo(20));
Assert.That(results.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected), "Expected the message list in the correct order.");
Assert.That(results.Any(x => x.Key.ToUtf8String() != testId), Is.False);
}
}
}
示例5: ProducerAckLevel1ResponseOffsetShouldBeEqualToLastOffset
public async Task ProducerAckLevel1ResponseOffsetShouldBeEqualToLastOffset()
{
using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog }))
using (var producer = new Producer(router))
{
var responseAckLevel1 = await producer.SendMessageAsync(new Message("Ack Level 1"), IntegrationConfig.IntegrationTopic, acks: 1, partition: 0);
var offsetResponse = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic);
var maxOffset = offsetResponse.Find(x => x.PartitionId == 0);
Assert.AreEqual(responseAckLevel1.Offset, maxOffset.Offsets.Max() - 1);
}
}
示例6: ConsumerFailure
public void ConsumerFailure()
{
string topic = "TestTopicIssue13-2-3R-1P";
using (var router = new BrokerRouter(_options))
{
var producer = new Producer(router);
var offsets = producer.GetTopicOffsetAsync(topic).Result;
var maxOffsets = offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
var consumerOptions = new ConsumerOptions(topic, router) { PartitionWhitelist = new List<int>() { 0 }, MaxWaitTimeForMinimumBytes = TimeSpan.Zero };
SandMessageForever(producer, topic);
ReadMessageForever(consumerOptions, maxOffsets);
}
}
示例7: EnsureGzipCanDecompressMessageFromKafka
public void EnsureGzipCanDecompressMessageFromKafka()
{
var producer = new Producer(_router);
var offsets = producer.GetTopicOffsetAsync(CompressTopic).Result;
var consumer = new Consumer(new ConsumerOptions("Empty", _router),
offsets.Select(x => new OffsetPosition(x.PartitionId, 0)).ToArray());
var results = consumer.Consume().Take(3).ToList();
for (int i = 0; i < 3; i++)
{
Assert.That(results[i].Value, Is.EqualTo(i.ToString()));
}
}
示例8: EnsureGzipCanDecompressMessageFromKafka
public void EnsureGzipCanDecompressMessageFromKafka()
{
var router = new BrokerRouter(_options);
var producer = new Producer(router);
var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result;
var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router),
offsets.Select(x => new OffsetPosition(x.PartitionId, 0)).ToArray());
var results = consumer.Consume().Take(3).ToList();
for (int i = 0; i < 3; i++)
{
Assert.That(results[i].Value, Is.EqualTo(i.ToString()));
}
using (producer)
using (consumer) { }
}
示例9: ConsumerShouldNotLoseMessageWhenBlocked
public void ConsumerShouldNotLoseMessageWhenBlocked()
{
var testId = Guid.NewGuid().ToString();
using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
using (var producer = new Producer(router))
{
var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result;
//create consumer with buffer size of 1 (should block upstream)
using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { ConsumerBufferSize = 1 },
offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
{
for (int i = 0; i < 20; i++)
{
producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
}
for (int i = 0; i < 20; i++)
{
var result = consumer.Consume().Take(1).First();
Assert.That(result.Key.ToUtf8String(), Is.EqualTo(testId));
Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString()));
}
}
}
}
示例10: ConsumerShouldBeAbleToGetCurrentOffsetInformation
public void ConsumerShouldBeAbleToGetCurrentOffsetInformation()
{
using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
using (var producer = new Producer(router))
{
var startOffsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result
.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), startOffsets))
{
for (int i = 0; i < 20; i++)
{
producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "1") }).Wait();
}
var results = consumer.Consume().Take(20).ToList();
//ensure the produced messages arrived
for (int i = 0; i < 20; i++)
{
Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString()));
}
//the current offsets should be 20 positions higher than start
var currentOffsets = consumer.GetOffsetPosition();
Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20));
}
}
}
示例11: ProducerShouldUsePartitionIdInsteadOfMessageKeyToChoosePartition
public async Task ProducerShouldUsePartitionIdInsteadOfMessageKeyToChoosePartition()
{
Mock<IPartitionSelector> partitionSelector = new Mock<IPartitionSelector>();
partitionSelector.Setup(x => x.Select(It.IsAny<Topic>(), It.IsAny<byte[]>())).Returns((Topic y, byte[] y1) => { return y.Partitions.Find(p => p.PartitionId == 1); });
var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { PartitionSelector = partitionSelector.Object });
var producer = new Producer(router);
var offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic);
int partitionId = 0;
//message should send to PartitionId and not use the key to Select Broker Route !!
for (int i = 0; i < 20; i++)
{
await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "key") }, 1, null, MessageCodec.CodecNone, partitionId);
}
//consume form partitionId to verify that date is send to currect partion !!.
var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { PartitionWhitelist = { partitionId } }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());
for (int i = 0; i < 20; i++)
{
Message result = null;// = consumer.Consume().Take(1).First();
await Task.Run(() => result = consumer.Consume().Take(1).First());
Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString()));
}
consumer.Dispose();
producer.Dispose();
}
示例12: ConsumerShouldBeAbleToSeekBackToEarlierOffset
public void ConsumerShouldBeAbleToSeekBackToEarlierOffset()
{
var expected = new List<string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" };
var testId = Guid.NewGuid().ToString();
using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
using (var producer = new Producer(router))
{
var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result
.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { MaxWaitTimeForMinimumBytes = TimeSpan.Zero }, offsets))
{
for (int i = 0; i < 20; i++)
{
producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
}
var sentMessages = consumer.Consume().Take(20).ToList();
//ensure the produced messages arrived
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("Message order: {0}", string.Join(", ", sentMessages.Select(x => x.Value.ToUtf8String()).ToList())));
Assert.That(sentMessages.Count, Is.EqualTo(20));
Assert.That(sentMessages.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected));
Assert.That(sentMessages.Any(x => x.Key.ToUtf8String() != testId), Is.False);
//seek back to initial offset
consumer.SetOffsetPosition(offsets);
var resetPositionMessages = consumer.Consume().Take(20).ToList();
//ensure all produced messages arrive again
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("Message order: {0}", string.Join(", ", resetPositionMessages.Select(x => x.Value).ToList())));
Assert.That(resetPositionMessages.Count, Is.EqualTo(20));
Assert.That(resetPositionMessages.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected));
Assert.That(resetPositionMessages.Any(x => x.Key.ToUtf8String() != testId), Is.False);
}
}
}
示例13: ConsumerShouldConsumeInSameOrderAsAsyncProduced
/// <summary>
/// order Should remain in the same ack leve and partition
/// </summary>
/// <returns></returns>
public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced()
{
int partition = 0;
int numberOfMessage = 200;
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter"));
var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri));
int causesRaceConditionOldVersion = 2;
var producer = new Producer(router, causesRaceConditionOldVersion) { BatchDelayTime = TimeSpan.Zero };//this is slow on purpose
//this is not slow var producer = new Producer(router);
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer"));
List<OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic);
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset"));
List<Task> sendList = new List<Task>(numberOfMessage);
for (int i = 0; i < numberOfMessage; i++)
{
var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition);
sendList.Add(sendTask);
}
await Task.WhenAll(sendList.ToArray());
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send"));
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create Consumer"));
ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router);
consumerOptions.PartitionWhitelist = new List<int> { partition };
Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());
int expected = 0;
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume"));
await Task.Run((() =>
{
var results = consumer.Consume().Take(numberOfMessage).ToList();
Assert.IsTrue(results.Count() == numberOfMessage, "not Consume all ,messages");
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume"));
foreach (Message message in results)
{
Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()),
"Expected the message list in the correct order.");
expected++;
}
}));
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose"));
producer.Dispose();
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose"));
consumer.Dispose();
IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose"));
router.Dispose();
}
示例14: ConsumerShouldConsumeInSameOrderAsProduced
public void ConsumerShouldConsumeInSameOrderAsProduced()
{
var producer = new Producer(_router);
var offsets = producer.GetTopicOffsetAsync("LoadTest").Result;
var consumer = new Consumer(new ConsumerOptions("LoadTest", _router),
offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());
var tasks = new List<Task<List<ProduceResponse>>>();
for (int i = 0; i < 20; i++)
{
tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } }));
}
Task.WaitAll(tasks.ToArray());
var results = consumer.Consume().Take(20).ToList();
for (int i = 0; i < 20; i++)
{
Assert.That(results[i].Value == i.ToString());
}
}
示例15: ConsumerShouldMoveToNextAvailableOffsetWhenQueryingForNextMessage
public async void ConsumerShouldMoveToNextAvailableOffsetWhenQueryingForNextMessage()
{
const int expectedCount = 1000;
var options = new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = new ConsoleLog() };
using (var producerRouter = new BrokerRouter(options))
using (var producer = new Producer(producerRouter))
{
//get current offset and reset consumer to top of log
var offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false);
using (var consumerRouter = new BrokerRouter(options))
using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, consumerRouter),
offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
{
Console.WriteLine("Sending {0} test messages", expectedCount);
var response = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic,
Enumerable.Range(0, expectedCount).Select(x => new Message(x.ToString())));
Assert.That(response.Any(x => x.Error != (int)ErrorResponseCode.NoError), Is.False, "Error occured sending test messages to server.");
var stream = consumer.Consume();
Console.WriteLine("Reading message back out from consumer.");
var data = stream.Take(expectedCount).ToList();
var consumerOffset = consumer.GetOffsetPosition().OrderBy(x => x.Offset).ToList();
var serverOffset = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false);
var positionOffset = serverOffset.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max()))
.OrderBy(x => x.Offset)
.ToList();
Assert.That(consumerOffset, Is.EqualTo(positionOffset), "The consumerOffset position should match the server offset position.");
Assert.That(data.Count, Is.EqualTo(expectedCount), "We should have received 2000 messages from the server.");
}
}
}