本文整理汇总了C#中ReplaySubject.Select方法的典型用法代码示例。如果您正苦于以下问题:C# ReplaySubject.Select方法的具体用法?C# ReplaySubject.Select怎么用?C# ReplaySubject.Select使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ReplaySubject
的用法示例。
在下文中一共展示了ReplaySubject.Select方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: GetProgressFromASyncOperation
public async Task GetProgressFromASyncOperation()
{
CredentialsHandler credentials = (url, usernameFromUrl, types) =>
new UsernamePasswordCredentials
{
Username = "shiftkey-tester",
Password = "haha-password"
};
var repository = new ObservableRepository(
@"C:\Users\brendanforster\Documents\GìtHūb\testing-pushspecs",
credentials);
Func<int, int> translate = x => x / 3;
var pullObserver = new ReplaySubject<Tuple<string, int>>();
var pushObserver = new ReplaySubject<Tuple<string, int>>();
var pullResult = await repository.Pull(pullObserver);
Assert.NotEqual(MergeStatus.Conflicts, pullResult.Status);
await repository.Push(pushObserver);
var list = await pullObserver.Select(x => translate(x.Item2) * 2)
.Concat(pushObserver.Select(x => 67 + translate(x.Item2)))
.ToList();
Assert.NotEmpty(list);
Assert.Equal(100, list.Last());
}
示例2: CanCloneARepository
public async Task CanCloneARepository()
{
using (var directory = TestDirectory.Create())
{
var cloneObserver = new ReplaySubject<Tuple<string, int>>();
using (await ObservableRepository.Clone(
"https://github.com/shiftkey/rxui-design-guidelines.git",
directory.Path,
cloneObserver))
{
Assert.NotEmpty(Directory.GetFiles(directory.Path));
var progressList = await cloneObserver.Select(x => x.Item2).ToList();
Assert.Equal(100, progressList.Last());
}
}
}
示例3: ConsumerFollowsRebalancingPartitions
public async void ConsumerFollowsRebalancingPartitions()
{
kafka4net.Tracing.EtwTrace.Marker("ConsumerFollowsRebalancingPartitions");
// create a topic
var topic = "topic33." + _rnd.Next();
VagrantBrokerUtil.CreateTopic(topic,11,3);
// Stop two brokers to let leadership shift to broker1.
VagrantBrokerUtil.StopBroker("broker2");
VagrantBrokerUtil.StopBroker("broker3");
await Task.Delay(TimeSpan.FromSeconds(5));
// now start back up
VagrantBrokerUtil.StartBroker("broker2");
VagrantBrokerUtil.StartBroker("broker3");
// wait a little for everything to start
await Task.Delay(TimeSpan.FromSeconds(5));
// we should have all of them with leader 1
var cluster = new Cluster(_seed2Addresses);
await cluster.ConnectAsync();
var partitionMeta = await cluster.GetOrFetchMetaForTopicAsync(topic);
// make sure they're all on a single leader
Assert.AreEqual(1, partitionMeta.GroupBy(p=>p.Leader).Count());
// now publish messages
const int count = 25000;
var producer = new Producer(cluster, new ProducerConfiguration(topic));
_log.Debug("Connecting");
await producer.ConnectAsync();
_log.Debug("Filling out {0} with {1} messages", topic, count);
var sentList = await Enumerable.Range(0, count)
.Select(i => new Message { Value = BitConverter.GetBytes(i) })
.ToObservable()
.Do(producer.Send)
.Select(msg => BitConverter.ToInt32(msg.Value, 0))
.ToList();
await Task.Delay(TimeSpan.FromSeconds(1));
_log.Info("Done sending messages. Closing producer.");
await producer.CloseAsync(TimeSpan.FromSeconds(5));
_log.Info("Producer closed, starting consumer subscription.");
await Task.Delay(TimeSpan.FromSeconds(1));
var heads = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart);
var tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd);
var messagesInTopic = (int)tails.MessagesSince(heads);
_log.Info("Topic offsets indicate producer sent {0} messages.", messagesInTopic);
var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart(), maxBytesPerFetch: 4 * 8));
var current = 0;
var received = new ReplaySubject<ReceivedMessage>();
Task rebalanceTask = null;
var consumerSubscription = consumer.OnMessageArrived.
Subscribe(async msg =>
{
current++;
if (current == 18)
{
rebalanceTask = Task.Factory.StartNew(VagrantBrokerUtil.RebalanceLeadership, CancellationToken.None, TaskCreationOptions.None, TaskScheduler.Default);
}
received.OnNext(msg);
//_log.Info("Got: {0}", BitConverter.ToInt32(msg.Value, 0));
});
await consumer.IsConnected;
_log.Info("Waiting for receiver complete");
var receivedList = await received.Select(msg => BitConverter.ToInt32(msg.Value, 0)).
Take(messagesInTopic).
TakeUntil(DateTime.Now.AddMinutes(3)).
ToList().
ToTask();
if (rebalanceTask != null)
{
_log.Info("Waiting for rebalance complete");
await rebalanceTask;//.TimeoutAfter(TimeSpan.FromSeconds(10));
_log.Info("Rebalance complete");
}
_log.Info("Receiver complete. Disposing Subscription");
consumerSubscription.Dispose();
_log.Info("Consumer subscription disposed. Closing consumer.");
consumer.Dispose();
_log.Info("Consumer closed.");
tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd);
await cluster.CloseAsync(TimeSpan.FromSeconds(5));
_log.Info("Sum of offsets: {0}", tails.MessagesSince(heads));
_log.Info("Offsets: [{0}]", string.Join(",", tails.Partitions.Select(p => string.Format("{0}:{1}", p, tails.NextOffset(p)))));
//.........这里部分代码省略.........
示例4: ListenerRecoveryTest
public async void ListenerRecoveryTest()
{
kafka4net.Tracing.EtwTrace.Marker("ListenerRecoveryTest");
const int count = 10000;
var topic = "part33." + _rnd.Next();
VagrantBrokerUtil.CreateTopic(topic, 6, 3);
var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));
_log.Debug("Connecting");
await producer.ConnectAsync();
_log.Debug("Filling out {0} with {1} messages", topic, count);
var sentList = await Enumerable.Range(0, count)
.Select(i => new Message { Value = BitConverter.GetBytes(i) })
.ToObservable()
.Do(producer.Send)
.Select(msg => BitConverter.ToInt32(msg.Value, 0))
.ToList();
await Task.Delay(TimeSpan.FromSeconds(1));
var heads = await producer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart);
var tails = await producer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd);
_log.Info("Done sending messages. Closing producer.");
await producer.CloseAsync(TimeSpan.FromSeconds(5));
_log.Info("Producer closed, starting consumer subscription.");
var messagesInTopic = (int)tails.MessagesSince(heads);
_log.Info("Topic offsets indicate producer sent {0} messages.", messagesInTopic);
var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart(), maxBytesPerFetch: 4 * 8));
var current = 0;
var received = new ReplaySubject<ReceivedMessage>();
Task stopBrokerTask = null;
var consumerSubscription = consumer.OnMessageArrived.
Subscribe(async msg =>
{
current++;
if (current == 18)
{
stopBrokerTask = Task.Factory.StartNew(() => VagrantBrokerUtil.StopBrokerLeaderForPartition(consumer.Cluster, consumer.Topic, msg.Partition), CancellationToken.None, TaskCreationOptions.None, TaskScheduler.Default);
}
received.OnNext(msg);
//_log.Info("Got: {0}", BitConverter.ToInt32(msg.Value, 0));
});
await consumer.IsConnected;
_log.Info("Waiting for receiver complete");
var receivedList = await received.Select(msg => BitConverter.ToInt32(msg.Value, 0)).Take(messagesInTopic).
TakeUntil(DateTime.Now.AddSeconds(60)).ToList().ToTask();
if (stopBrokerTask != null)
await stopBrokerTask.TimeoutAfter(TimeSpan.FromSeconds(10));
tails = await consumer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd);
_log.Info("Receiver complete. Disposing Subscription");
consumerSubscription.Dispose();
_log.Info("Consumer subscription disposed. Closing consumer.");
consumer.Dispose();
_log.Info("Consumer closed.");
_log.Info("Sum of offsets: {0}", tails.MessagesSince(heads));
_log.Info("Offsets: [{0}]", string.Join(",", tails.Partitions.Select(p => string.Format("{0}:{1}", p, tails.NextOffset(p)))));
if (messagesInTopic != receivedList.Count)
{
// log some debug info.
_log.Error("Did not receive all messages. Messages sent but NOT received: {0}", string.Join(",", sentList.Except(receivedList).OrderBy(i => i)));
}
Assert.AreEqual(messagesInTopic, receivedList.Count);
kafka4net.Tracing.EtwTrace.Marker("/ListenerRecoveryTest");
}