本文整理汇总了C#中KafkaNet.Producer类的典型用法代码示例。如果您正苦于以下问题:C# Producer类的具体用法?C# Producer怎么用?C# Producer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Producer类属于KafkaNet命名空间,在下文中一共展示了Producer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Main
public void Main(string[] args)
{
var options = GetOptions(args);
if (options == null) return;
var count = 0;
var lastCount = 0;
var reporter = new Task(() =>
{
while (true)
{
var current = count;
Console.WriteLine("{0} messages in last second.", current - lastCount);
lastCount = current;
Thread.Sleep(1000);
}
});
var kafkaOptions = new KafkaOptions(options.KafkaNodeUri);// { Log = new ConsoleLog() };
using (var router = new BrokerRouter(kafkaOptions))
using (var client = new KafkaNet.Producer(router))
{
reporter.Start();
while (true)
{
Thread.Sleep(100);
client.SendMessageAsync("TestHarness", new[] { new Message() { Value = BitConverter.GetBytes(DateTime.Now.Ticks) } });
count++;
}
}
}
示例2: Main
static void Main(string[] args)
{
var options = new KafkaOptions(new Uri("http://CSDKAFKA01:9092"), new Uri("http://CSDKAFKA02:9092"))
{
Log = new ConsoleLog()
};
var router = new BrokerRouter(options);
var client = new Producer(router);
Task.Factory.StartNew(() =>
{
var consumer = new Consumer(new ConsumerOptions("TestHarness", router));
foreach (var data in consumer.Consume())
{
Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value);
}
});
Console.WriteLine("Type a message and press enter...");
while (true)
{
var message = Console.ReadLine();
if (message == "quit") break;
client.SendMessageAsync("TestHarness", new[] {new Message {Value = message}});
}
using (client)
using (router)
{
}
}
示例3: EnsureProducerDisposesRouter
public void EnsureProducerDisposesRouter()
{
var router = _kernel.GetMock<IBrokerRouter>();
var producer = new Producer(router.Object);
using (producer) { }
router.Verify(x => x.Dispose(), Times.Once());
}
示例4: EnsureGzipCanDecompressMessageFromKafka
public void EnsureGzipCanDecompressMessageFromKafka()
{
var router = new BrokerRouter(_options);
var producer = new Producer(router);
var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result;
var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router) { PartitionWhitelist = new List<int>() { 0 } },
offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());
int numberOfmessage = 3;
for (int i = 0; i < numberOfmessage; i++)
{
producer.SendMessageAsync(IntegrationConfig.IntegrationCompressionTopic, new[] { new Message(i.ToString()) }, codec: MessageCodec.CodecGzip,
partition: 0);
}
var results = consumer.Consume(new CancellationTokenSource(TimeSpan.FromMinutes(1)).Token).Take(numberOfmessage).ToList();
for (int i = 0; i < numberOfmessage; i++)
{
Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString()));
}
using (producer)
using (consumer) { }
}
示例5: Run
public void Run()
{
var options = new KafkaOptions(new Uri("http://localhost:9092"));
var router = new BrokerRouter(options);
_producer = new KafkaNet.Producer(router);
Process();
}
示例6: ProducerShouldReportCorrectAmountOfAsyncRequests
public void ProducerShouldReportCorrectAmountOfAsyncRequests()
{
var semaphore = new SemaphoreSlim(0);
var routerProxy = new FakeBrokerRouter();
//block the second call returning from send message async
routerProxy.BrokerConn0.ProduceResponseFunction = () => { semaphore.Wait(); return new ProduceResponse(); };
var router = routerProxy.Create();
using (var producer = new Producer(router, maximumAsyncRequests: 1) { BatchSize = 1 })
{
var messages = new[] { new Message("1") };
Assert.That(producer.AsyncCount, Is.EqualTo(0));
var sendTask = producer.SendMessageAsync(BrokerRouterProxy.TestTopic, messages);
TaskTest.WaitFor(() => producer.AsyncCount > 0);
Assert.That(producer.AsyncCount, Is.EqualTo(1), "One async operation should be sending.");
semaphore.Release();
sendTask.Wait(TimeSpan.FromMilliseconds(500));
Assert.That(sendTask.IsCompleted, Is.True, "Send task should be marked as completed.");
Assert.That(producer.AsyncCount, Is.EqualTo(0), "Async should now show zero count.");
}
}
示例7: ProtocolGateway
public async Task ProtocolGateway()
{
int partitionId = 0;
var router = new BrokerRouter(Options);
var producer = new Producer(router);
string messge1 = Guid.NewGuid().ToString();
var respose = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(messge1) }, 1, null, MessageCodec.CodecNone, partitionId);
var offset = respose.FirstOrDefault().Offset;
ProtocolGateway protocolGateway = new ProtocolGateway(IntegrationConfig.IntegrationUri);
var fetch = new Fetch
{
Topic = IntegrationConfig.IntegrationTopic,
PartitionId = partitionId,
Offset = offset,
MaxBytes = 32000,
};
var fetches = new List<Fetch> { fetch };
var fetchRequest = new FetchRequest
{
MaxWaitTime = 1000,
MinBytes = 10,
Fetches = fetches
};
var r = await protocolGateway.SendProtocolRequest(fetchRequest, IntegrationConfig.IntegrationTopic, partitionId);
// var r1 = await protocolGateway.SendProtocolRequest(fetchRequest, IntegrationConfig.IntegrationTopic, partitionId);
Assert.IsTrue(r.Messages.FirstOrDefault().Value.ToUtf8String() == messge1);
}
示例8: FetchMessagesCacheContainsAllRequestTest
public async Task FetchMessagesCacheContainsAllRequestTest()
{
// Creating a broker router and a protocol gateway for the producer and consumer
var brokerRouter = new BrokerRouter(_options);
var protocolGateway = new ProtocolGateway(_kafkaUri);
Producer producer = new Producer(brokerRouter);
ManualConsumer consumer = new ManualConsumer(_partitionId, _topic, protocolGateway, "TestClient", DefaultMaxMessageSetSize);
var offset = await consumer.FetchLastOffset();
// Creating 5 messages
List<Message> messages = CreateTestMessages(10, 1);
await producer.SendMessageAsync(_topic, messages, partition: _partitionId, timeout: TimeSpan.FromSeconds(3));
// Now let's consume
var result = (await consumer.FetchMessages(5, offset)).ToList();
CheckMessages(messages.Take(5).ToList(), result);
// Now let's consume again
result = (await consumer.FetchMessages(5, offset + 5)).ToList();
CheckMessages(messages.Skip(5).ToList(), result);
}
示例9: ConsumerShouldConsumeInSameOrderAsProduced
public void ConsumerShouldConsumeInSameOrderAsProduced()
{
var expected = new List<string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" };
var testId = Guid.NewGuid().ToString();
using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
using (var producer = new Producer(router))
{
var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result;
using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router),
offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
{
for (int i = 0; i < 20; i++)
{
producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
}
var results = consumer.Consume().Take(20).ToList();
//ensure the produced messages arrived
Console.WriteLine("Message order: {0}", string.Join(", ", results.Select(x => x.Value.ToUtf8String()).ToList()));
Assert.That(results.Count, Is.EqualTo(20));
Assert.That(results.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected), "Expected the message list in the correct order.");
Assert.That(results.Any(x => x.Key.ToUtf8String() != testId), Is.False);
}
}
}
示例10: ConsumerShouldBeAbleToSeekBackToEarlierOffset
public void ConsumerShouldBeAbleToSeekBackToEarlierOffset()
{
var producer = new Producer(_router);
var offsets = producer.GetTopicOffsetAsync("LoadTest").Result
.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), offsets);
var tasks = new List<Task<List<ProduceResponse>>>();
for (int i = 0; i < 20; i++)
{
tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } }));
}
Task.WaitAll(tasks.ToArray());
var results = consumer.Consume().Take(20).ToList();
//ensure the produced messages arrived
for (int i = 0; i < 20; i++)
{
Assert.That(results[i].Value == i.ToString());
}
//seek back to initial offset
consumer.SetOffsetPosition(offsets);
//ensure all produced messages arrive again
for (int i = 0; i < 20; i++)
{
Assert.That(results[i].Value == i.ToString());
}
}
示例11: ConsumerShouldBeAbleToGetCurrentOffsetInformation
public void ConsumerShouldBeAbleToGetCurrentOffsetInformation()
{
var producer = new Producer(_router);
var startOffsets = producer.GetTopicOffsetAsync("LoadTest").Result
.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), startOffsets);
var tasks = new List<Task<List<ProduceResponse>>>();
for (int i = 0; i < 20; i++)
{
tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } }));
}
Task.WaitAll(tasks.ToArray());
var results = consumer.Consume().Take(20).ToList();
//ensure the produced messages arrived
for (int i = 0; i < 20; i++)
{
Assert.That(results[i].Value == i.ToString());
}
//the current offsets should be 20 positions higher than start
var currentOffsets = consumer.GetOffsetPosition();
Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20));
}
示例12: DestinationKafka
public DestinationKafka(params Uri[] servers)
{
var options = new KafkaOptions(servers) { Log = new ConsoleLogger() };
_router = new BrokerRouter(options);
_producer = new Producer(_router, maximumMessageBuffer: 5000, maximumAsyncRequests: 10) { BatchSize = 1000, BatchDelayTime = TimeSpan.FromSeconds(1) };
StatisticsTracker.OnStatisticsHeartbeat += StatisticsTracker_OnStatisticsHeartbeat;
}
示例13: SendAsyncShouldGetOneResultForMessage
public async void SendAsyncShouldGetOneResultForMessage()
{
using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
using (var producer = new Producer(router))
{
var result = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(Guid.NewGuid().ToString()) });
Assert.That(result.Count, Is.EqualTo(1));
}
}
示例14: SendAsyncShouldGetAResultForEachPartitionSentTo
public async void SendAsyncShouldGetAResultForEachPartitionSentTo()
{
using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
using (var producer = new Producer(router))
{
var result = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message("1"), new Message("2"), new Message("3") });
Assert.That(result.Count, Is.EqualTo(2));
}
}
示例15: ProducerAckLevel
public async Task ProducerAckLevel()
{
using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog }))
using (var producer = new Producer(router))
{
var responseAckLevel0 = await producer.SendMessageAsync(new Message("Ack Level 0"), IntegrationConfig.IntegrationTopic, acks: 0, partition: 0);
Assert.AreEqual(responseAckLevel0.Offset, -1);
var responseAckLevel1 = await producer.SendMessageAsync(new Message("Ack Level 1"), IntegrationConfig.IntegrationTopic, acks: 1, partition: 0);
Assert.That(responseAckLevel1.Offset, Is.GreaterThan(-1));
}
}