当前位置: 首页>>代码示例>>C#>>正文


C# Producer.GetTopicOffsetAsync方法代码示例

本文整理汇总了C#中KafkaNet.Producer.GetTopicOffsetAsync方法的典型用法代码示例。如果您正苦于以下问题:C# Producer.GetTopicOffsetAsync方法的具体用法?C# Producer.GetTopicOffsetAsync怎么用?C# Producer.GetTopicOffsetAsync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在KafkaNet.Producer的用法示例。


在下文中一共展示了Producer.GetTopicOffsetAsync方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: EnsureGzipCanDecompressMessageFromKafka

        public void EnsureGzipCanDecompressMessageFromKafka()
        {
            var router = new BrokerRouter(_options);
            var producer = new Producer(router);

            var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result;

            var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router) { PartitionWhitelist = new List<int>() { 0 } },
                offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());
            int numberOfmessage = 3;
            for (int i = 0; i < numberOfmessage; i++)
            {
                producer.SendMessageAsync(IntegrationConfig.IntegrationCompressionTopic, new[] { new Message(i.ToString()) }, codec: MessageCodec.CodecGzip,
              partition: 0);
            }

            var results = consumer.Consume(new CancellationTokenSource(TimeSpan.FromMinutes(1)).Token).Take(numberOfmessage).ToList();

            for (int i = 0; i < numberOfmessage; i++)
            {
                Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString()));
            }

            using (producer)
            using (consumer) { }
        }
开发者ID:gigya,项目名称:KafkaNetClient,代码行数:26,代码来源:GzipProducerConsumerTests.cs

示例2: ConsumerShouldBeAbleToSeekBackToEarlierOffset

        public void ConsumerShouldBeAbleToSeekBackToEarlierOffset()
        {
            var producer = new Producer(_router);

            var offsets = producer.GetTopicOffsetAsync("LoadTest").Result
                .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();

            var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), offsets);

            var tasks = new List<Task<List<ProduceResponse>>>();
            for (int i = 0; i < 20; i++)
            {
                tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } }));
            }
            Task.WaitAll(tasks.ToArray());

            var results = consumer.Consume().Take(20).ToList();

            //ensure the produced messages arrived
            for (int i = 0; i < 20; i++)
            {
                Assert.That(results[i].Value == i.ToString());
            }

            //seek back to initial offset
            consumer.SetOffsetPosition(offsets);

            //ensure all produced messages arrive again
            for (int i = 0; i < 20; i++)
            {
                Assert.That(results[i].Value == i.ToString());
            }
        }
开发者ID:jmucza,项目名称:kafka-net,代码行数:33,代码来源:ProducerConsumerTests.cs

示例3: ConsumerShouldBeAbleToGetCurrentOffsetInformation

        public void ConsumerShouldBeAbleToGetCurrentOffsetInformation()
        {
            var producer = new Producer(_router);

            var startOffsets = producer.GetTopicOffsetAsync("LoadTest").Result
                .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();

            var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), startOffsets);

            var tasks = new List<Task<List<ProduceResponse>>>();
            for (int i = 0; i < 20; i++)
            {
                tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } }));
            }
            Task.WaitAll(tasks.ToArray());

            var results = consumer.Consume().Take(20).ToList();

            //ensure the produced messages arrived
            for (int i = 0; i < 20; i++)
            {
                Assert.That(results[i].Value == i.ToString());
            }

            //the current offsets should be 20 positions higher than start
            var currentOffsets = consumer.GetOffsetPosition();
            Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20));
        }
开发者ID:jmucza,项目名称:kafka-net,代码行数:28,代码来源:ProducerConsumerTests.cs

示例4: ConsumerShouldConsumeInSameOrderAsProduced

        public void ConsumerShouldConsumeInSameOrderAsProduced()
        {
            var expected = new List<string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" };
            var testId = Guid.NewGuid().ToString();

            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
            using (var producer = new Producer(router))
            {

                var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result;

                using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router),
                    offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
                {

                    for (int i = 0; i < 20; i++)
                    {
                        producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
                    }

                    var results = consumer.Consume().Take(20).ToList();

                    //ensure the produced messages arrived
                    Console.WriteLine("Message order:  {0}", string.Join(", ", results.Select(x => x.Value.ToUtf8String()).ToList()));

                    Assert.That(results.Count, Is.EqualTo(20));
                    Assert.That(results.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected), "Expected the message list in the correct order.");
                    Assert.That(results.Any(x => x.Key.ToUtf8String() != testId), Is.False);
                }
            }
        }
开发者ID:jsifantu,项目名称:kafka-net,代码行数:31,代码来源:ProducerConsumerIntegrationTests.cs

示例5: ProducerAckLevel1ResponseOffsetShouldBeEqualToLastOffset

 public async Task ProducerAckLevel1ResponseOffsetShouldBeEqualToLastOffset()
 {
     using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog }))
     using (var producer = new Producer(router))
     {
         var responseAckLevel1 = await producer.SendMessageAsync(new Message("Ack Level 1"), IntegrationConfig.IntegrationTopic, acks: 1, partition: 0);
         var offsetResponse = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic);
         var maxOffset = offsetResponse.Find(x => x.PartitionId == 0);
         Assert.AreEqual(responseAckLevel1.Offset, maxOffset.Offsets.Max() - 1);
     }
 }
开发者ID:pooyasoleimany,项目名称:KafkaNetClient,代码行数:11,代码来源:ProducerConsumerIntegrationTests.cs

示例6: ConsumerFailure

        public void ConsumerFailure()
        {
            string topic = "TestTopicIssue13-2-3R-1P";
            using (var router = new BrokerRouter(_options))
            {
                var producer = new Producer(router);
                var offsets = producer.GetTopicOffsetAsync(topic).Result;
                var maxOffsets = offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
                var consumerOptions = new ConsumerOptions(topic, router) { PartitionWhitelist = new List<int>() { 0 }, MaxWaitTimeForMinimumBytes = TimeSpan.Zero };

                SandMessageForever(producer, topic);
                ReadMessageForever(consumerOptions, maxOffsets);
            }
        }
开发者ID:gigya,项目名称:KafkaNetClient,代码行数:14,代码来源:ManualTesting.cs

示例7: EnsureGzipCanDecompressMessageFromKafka

        public void EnsureGzipCanDecompressMessageFromKafka()
        {
            var producer = new Producer(_router);

            var offsets = producer.GetTopicOffsetAsync(CompressTopic).Result;

            var consumer = new Consumer(new ConsumerOptions("Empty", _router),
                offsets.Select(x => new OffsetPosition(x.PartitionId, 0)).ToArray());

            var results = consumer.Consume().Take(3).ToList();

            for (int i = 0; i < 3; i++)
            {
                Assert.That(results[i].Value, Is.EqualTo(i.ToString()));
            }
        }
开发者ID:ntent-ad,项目名称:kafka-net,代码行数:16,代码来源:GzipProducerConsumerTests.cs

示例8: EnsureGzipCanDecompressMessageFromKafka

        public void EnsureGzipCanDecompressMessageFromKafka()
        {
            var router = new BrokerRouter(_options);
            var producer = new Producer(router);

            var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result;

            var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router),
                offsets.Select(x => new OffsetPosition(x.PartitionId, 0)).ToArray());

            var results = consumer.Consume().Take(3).ToList();

            for (int i = 0; i < 3; i++)
            {
                Assert.That(results[i].Value, Is.EqualTo(i.ToString()));
            }

            using (producer)
            using (consumer) { }
        }
开发者ID:jsifantu,项目名称:kafka-net,代码行数:20,代码来源:GzipProducerConsumerTests.cs

示例9: ConsumerShouldNotLoseMessageWhenBlocked

        public void ConsumerShouldNotLoseMessageWhenBlocked()
        {
            var testId = Guid.NewGuid().ToString();

            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
            using (var producer = new Producer(router))
            {
                var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result;

                //create consumer with buffer size of 1 (should block upstream)
                using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { ConsumerBufferSize = 1 },
                    offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
                {

                    for (int i = 0; i < 20; i++)
                    {
                        producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
                    }

                    for (int i = 0; i < 20; i++)
                    {
                        var result = consumer.Consume().Take(1).First();
                        Assert.That(result.Key.ToUtf8String(), Is.EqualTo(testId));
                        Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString()));
                    }
                }
            }
        }
开发者ID:jsifantu,项目名称:kafka-net,代码行数:28,代码来源:ProducerConsumerIntegrationTests.cs

示例10: ConsumerShouldBeAbleToGetCurrentOffsetInformation

        public void ConsumerShouldBeAbleToGetCurrentOffsetInformation()
        {
            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
            using (var producer = new Producer(router))
            {

                var startOffsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result
                    .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();

                using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), startOffsets))
                {

                    for (int i = 0; i < 20; i++)
                    {
                        producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "1") }).Wait();
                    }

                    var results = consumer.Consume().Take(20).ToList();

                    //ensure the produced messages arrived
                    for (int i = 0; i < 20; i++)
                    {
                        Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString()));
                    }

                    //the current offsets should be 20 positions higher than start
                    var currentOffsets = consumer.GetOffsetPosition();
                    Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20));
                }
            }
        }
开发者ID:jsifantu,项目名称:kafka-net,代码行数:31,代码来源:ProducerConsumerIntegrationTests.cs

示例11: ProducerShouldUsePartitionIdInsteadOfMessageKeyToChoosePartition

        public async Task ProducerShouldUsePartitionIdInsteadOfMessageKeyToChoosePartition()
        {
            Mock<IPartitionSelector> partitionSelector = new Mock<IPartitionSelector>();
            partitionSelector.Setup(x => x.Select(It.IsAny<Topic>(), It.IsAny<byte[]>())).Returns((Topic y, byte[] y1) => { return y.Partitions.Find(p => p.PartitionId == 1); });

            var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { PartitionSelector = partitionSelector.Object });
            var producer = new Producer(router);

            var offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic);
            int partitionId = 0;

            //message should send to PartitionId and not use the key to Select Broker Route !!
            for (int i = 0; i < 20; i++)
            {
                await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "key") }, 1, null, MessageCodec.CodecNone, partitionId);
            }

            //consume form partitionId to verify that date is send to currect partion !!.
            var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { PartitionWhitelist = { partitionId } }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());

            for (int i = 0; i < 20; i++)
            {
                Message result = null;// = consumer.Consume().Take(1).First();
                await Task.Run(() => result = consumer.Consume().Take(1).First());
                Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString()));
            }

            consumer.Dispose();
            producer.Dispose();
        }
开发者ID:pooyasoleimany,项目名称:KafkaNetClient,代码行数:30,代码来源:ProducerConsumerIntegrationTests.cs

示例12: ConsumerShouldBeAbleToSeekBackToEarlierOffset

        public void ConsumerShouldBeAbleToSeekBackToEarlierOffset()
        {
            var expected = new List<string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" };
            var testId = Guid.NewGuid().ToString();

            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
            using (var producer = new Producer(router))
            {
                var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result
                    .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();

                using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { MaxWaitTimeForMinimumBytes = TimeSpan.Zero }, offsets))
                {
                    for (int i = 0; i < 20; i++)
                    {
                        producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
                    }

                    var sentMessages = consumer.Consume().Take(20).ToList();

                    //ensure the produced messages arrived
                    IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("Message order:  {0}", string.Join(", ", sentMessages.Select(x => x.Value.ToUtf8String()).ToList())));

                    Assert.That(sentMessages.Count, Is.EqualTo(20));
                    Assert.That(sentMessages.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected));
                    Assert.That(sentMessages.Any(x => x.Key.ToUtf8String() != testId), Is.False);

                    //seek back to initial offset
                    consumer.SetOffsetPosition(offsets);

                    var resetPositionMessages = consumer.Consume().Take(20).ToList();

                    //ensure all produced messages arrive again
                    IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("Message order:  {0}", string.Join(", ", resetPositionMessages.Select(x => x.Value).ToList())));

                    Assert.That(resetPositionMessages.Count, Is.EqualTo(20));
                    Assert.That(resetPositionMessages.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected));
                    Assert.That(resetPositionMessages.Any(x => x.Key.ToUtf8String() != testId), Is.False);
                }
            }
        }
开发者ID:pooyasoleimany,项目名称:KafkaNetClient,代码行数:41,代码来源:ProducerConsumerIntegrationTests.cs

示例13: ConsumerShouldConsumeInSameOrderAsAsyncProduced

        /// <summary>
        /// order Should remain in the same ack leve and partition
        /// </summary>
        /// <returns></returns>
        public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced()
        {
            int partition = 0;
            int numberOfMessage = 200;
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter"));
            var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri));
            int causesRaceConditionOldVersion = 2;
            var producer = new Producer(router, causesRaceConditionOldVersion) { BatchDelayTime = TimeSpan.Zero };//this is slow on purpose
            //this is not slow  var producer = new Producer(router);
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer"));
            List<OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic);
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset"));
            List<Task> sendList = new List<Task>(numberOfMessage);
            for (int i = 0; i < numberOfMessage; i++)
            {
                var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition);
                sendList.Add(sendTask);
            }

            await Task.WhenAll(sendList.ToArray());
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send"));

            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create Consumer"));
            ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router);
            consumerOptions.PartitionWhitelist = new List<int> { partition };

            Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());

            int expected = 0;
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume"));
            await Task.Run((() =>
            {
                var results = consumer.Consume().Take(numberOfMessage).ToList();
                Assert.IsTrue(results.Count() == numberOfMessage, "not Consume all ,messages");
                IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume"));

                foreach (Message message in results)
                {
                    Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()),
                        "Expected the message list in the correct order.");
                    expected++;
                }
            }));
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose"));
            producer.Dispose();
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose"));
            consumer.Dispose();
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose"));
            router.Dispose();
        }
开发者ID:pooyasoleimany,项目名称:KafkaNetClient,代码行数:54,代码来源:ProducerConsumerIntegrationTests.cs

示例14: ConsumerShouldConsumeInSameOrderAsProduced

        public void ConsumerShouldConsumeInSameOrderAsProduced()
        {
            var producer = new Producer(_router);

            var offsets = producer.GetTopicOffsetAsync("LoadTest").Result;

            var consumer = new Consumer(new ConsumerOptions("LoadTest", _router),
                offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());

            var tasks = new List<Task<List<ProduceResponse>>>();
            for (int i = 0; i < 20; i++)
            {
                tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } }));
            }
            Task.WaitAll(tasks.ToArray());

            var results = consumer.Consume().Take(20).ToList();

            for (int i = 0; i < 20; i++)
            {
                Assert.That(results[i].Value == i.ToString());
            }
        }
开发者ID:jmucza,项目名称:kafka-net,代码行数:23,代码来源:ProducerConsumerTests.cs

示例15: ConsumerShouldMoveToNextAvailableOffsetWhenQueryingForNextMessage

        public async void ConsumerShouldMoveToNextAvailableOffsetWhenQueryingForNextMessage()
        {
            const int expectedCount = 1000;
            var options = new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = new ConsoleLog() };

            using (var producerRouter = new BrokerRouter(options))
            using (var producer = new Producer(producerRouter))
            {
                //get current offset and reset consumer to top of log
                var offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false);

                using (var consumerRouter = new BrokerRouter(options))
                using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, consumerRouter),
                     offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
                {
                    Console.WriteLine("Sending {0} test messages", expectedCount);
                    var response = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, 
                        Enumerable.Range(0, expectedCount).Select(x => new Message(x.ToString())));

                    Assert.That(response.Any(x => x.Error != (int)ErrorResponseCode.NoError), Is.False, "Error occured sending test messages to server.");

                    var stream = consumer.Consume();

                    Console.WriteLine("Reading message back out from consumer.");
                    var data = stream.Take(expectedCount).ToList();

                    var consumerOffset = consumer.GetOffsetPosition().OrderBy(x => x.Offset).ToList();
                    var serverOffset = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false);
                    var positionOffset = serverOffset.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max()))
                        .OrderBy(x => x.Offset)
                        .ToList();

                    Assert.That(consumerOffset, Is.EqualTo(positionOffset), "The consumerOffset position should match the server offset position.");
                    Assert.That(data.Count, Is.EqualTo(expectedCount), "We should have received 2000 messages from the server.");

                }
            }
        }
开发者ID:jsifantu,项目名称:kafka-net,代码行数:38,代码来源:ProducerConsumerIntegrationTests.cs


注:本文中的KafkaNet.Producer.GetTopicOffsetAsync方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。