本文整理汇总了C#中Subject.Take方法的典型用法代码示例。如果您正苦于以下问题:C# Subject.Take方法的具体用法?C# Subject.Take怎么用?C# Subject.Take使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Subject
的用法示例。
在下文中一共展示了Subject.Take方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: ReadOffsets
public async void ReadOffsets()
{
kafka4net.Tracing.EtwTrace.Marker("ReadOffsets");
var sentEvents = new Subject<Message>();
var topic = "part12." + _rnd.Next();
VagrantBrokerUtil.CreateTopic(topic,1,1);
var cluster = new Cluster(_seed2Addresses);
await cluster.ConnectAsync();
var producer = new Producer(cluster, new ProducerConfiguration(topic, maxMessageSetSizeInBytes: 1024*1024));
producer.OnSuccess += e => e.ForEach(sentEvents.OnNext);
await producer.ConnectAsync();
// read offsets of empty queue
var heads = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart);
var tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd);
Assert.AreEqual(1, heads.Partitions.Count(), "Expected just one head partition");
Assert.AreEqual(1, tails.Partitions.Count(), "Expected just one tail partition");
Assert.AreEqual(0L, heads.NextOffset(heads.Partitions.First()), "Expected start at 0");
Assert.AreEqual(0L, tails.NextOffset(tails.Partitions.First()), "Expected end at 0");
// log the broker selected as master
var brokerMeta = cluster.FindBrokerMetaForPartitionId(topic, heads.Partitions.First());
_log.Info("Partition Leader is {0}", brokerMeta);
// saw some inconsistency, so run this a few times.
const int count = 1100;
const int loops = 10;
for (int i = 0; i < loops; i++)
{
// NOTE that the configuration for the test machines through vagrant are set to 1MB rolling file segments
// so we need to generate large messages to force multiple segments to be created.
// send count messages
var t = sentEvents.Take(count).ToTask();
Enumerable.Range(1, count).
Select(_ => new Message { Value = new byte[1024] }).
ForEach(producer.Send);
_log.Info("Waiting for {0} sent messages", count);
await t;
// re-read offsets after messages published
await Task.Delay(TimeSpan.FromSeconds(2)); // NOTE: There seems to be a race condition on the Kafka broker that the offsets are not immediately available after getting a successful produce response
tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd);
_log.Info("2:After loop {0} of {1} messages, Next Offset is {2}", i + 1, count, tails.NextOffset(tails.Partitions.First()));
Assert.AreEqual(count * (i + 1), tails.NextOffset(tails.Partitions.First()), "Expected end at " + count * (i + 1));
}
_log.Info("Closing producer");
await producer.CloseAsync(TimeSpan.FromSeconds(5));
await Task.Delay(TimeSpan.FromSeconds(1));
// re-read offsets after messages published
heads = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart);
tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd);
Assert.AreEqual(1, heads.Partitions.Count(), "Expected just one head partition");
Assert.AreEqual(1, tails.Partitions.Count(), "Expected just one tail partition");
Assert.AreEqual(0L, heads.NextOffset(heads.Partitions.First()), "Expected start at 0");
Assert.AreEqual(count*loops, tails.NextOffset(tails.Partitions.First()), "Expected end at " + count);
kafka4net.Tracing.EtwTrace.Marker("/ReadOffsets");
}
示例2: SaveOffsetsAndResumeConsuming
public async void SaveOffsetsAndResumeConsuming()
{
kafka4net.Tracing.EtwTrace.Marker("SaveOffsetsAndResumeConsuming");
var sentEvents = new Subject<Message>();
var topic = "part12." + _rnd.Next();
VagrantBrokerUtil.CreateTopic(topic, 5, 2);
var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));
producer.OnSuccess += e => e.ForEach(sentEvents.OnNext);
await producer.ConnectAsync();
// send 100 messages
Enumerable.Range(1, 100).
Select(i => new Message { Value = BitConverter.GetBytes(i) }).
ForEach(producer.Send);
_log.Info("Waiting for 100 sent messages");
sentEvents.Subscribe(msg => _log.Debug("Sent {0}", BitConverter.ToInt32(msg.Value, 0)));
await sentEvents.Take(100).ToTask();
var offsets1 = await producer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart);
_log.Info("Closing producer");
await producer.CloseAsync(TimeSpan.FromSeconds(5));
// now consume the "first" 50. Stop, save offsets, and restart.
var consumer1 = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, offsets1));
var receivedEvents = new List<int>(100);
_log.Info("Consuming first half of messages.");
await consumer1.OnMessageArrived
.Do(msg =>
{
var value = BitConverter.ToInt32(msg.Value, 0);
_log.Info("Consumer1 Received value {0} from partition {1} at offset {2}", value, msg.Partition, msg.Offset);
receivedEvents.Add(value);
offsets1.UpdateOffset(msg.Partition, msg.Offset);
})
.Take(50);
//await consumer1.IsConnected;
_log.Info("Closing first consumer");
consumer1.Dispose();
// now serialize the offsets.
var offsetBytes = offsets1.WriteOffsets();
// load a new set of offsets, and a new consumer
var offsets2 = new TopicPartitionOffsets(offsetBytes);
var consumer2 = new Consumer(new ConsumerConfiguration(_seed2Addresses, offsets2.Topic, offsets2));
await consumer2.OnMessageArrived
.Do(msg =>
{
var value = BitConverter.ToInt32(msg.Value, 0);
_log.Info("Consumer2 Received value {0} from partition {1} at offset {2}", value, msg.Partition, msg.Offset);
receivedEvents.Add(value);
offsets2.UpdateOffset(msg.Partition, msg.Offset);
})
.Take(50);
//await consumer2.IsConnected;
_log.Info("Closing second consumer");
consumer2.Dispose();
Assert.AreEqual(100, receivedEvents.Distinct().Count());
Assert.AreEqual(100, receivedEvents.Count);
kafka4net.Tracing.EtwTrace.Marker("/SaveOffsetsAndResumeConsuming");
}
示例3: FullyCancelledDebouncedRequestsGetForRealCancelled
public async Task FullyCancelledDebouncedRequestsGetForRealCancelled()
{
int messageCount = 0;
int finalMessageCount = 0;
Subject<Unit> gate = new Subject<Unit>();
var fixture = CreateFixture(new TestHttpMessageHandler(_ => {
var ret = new HttpResponseMessage() {
Content = new StringContent("foo", Encoding.UTF8),
StatusCode = HttpStatusCode.OK,
};
ret.Headers.ETag = new EntityTagHeaderValue("\"worifjw\"");
messageCount++;
return gate.Take(1)
.Do(__ => finalMessageCount++)
.Select(__ => ret);
}));
var client = new HttpClient(fixture) {
BaseAddress = new Uri("http://example"),
};
var rq1 = new HttpRequestMessage(HttpMethod.Get, "/");
var rq2 = new HttpRequestMessage(HttpMethod.Get, "/");
Assert.Equal(0, messageCount);
/* NB: Here's the thing we're testing for
*
* When we issue concurrent requests to the same resource, one of them
* will actually do the request, and one of them will wait on the other.
* In this case, rq1 will do the request, and rq2 will just return
* whatever rq1 will return.
*
* The key then, is to only truly cancel rq1 if both rq1 *and* rq2
* are cancelled, but rq1 should *appear* to be cancelled. This test
* cancels both requests then makes sure we actually cancel the
* underlying result */
var cts = new CancellationTokenSource();
var resp1Task = client.SendAsync(rq1, cts.Token);
var resp2Task = client.SendAsync(rq2, cts.Token);
Assert.Equal(1, messageCount);
Assert.Equal(0, finalMessageCount);
cts.Cancel();
gate.OnNext(Unit.Default);
gate.OnNext(Unit.Default);
await Assert.ThrowsAsync<TaskCanceledException>(() => resp1Task);
await Assert.ThrowsAsync<TaskCanceledException>(() => resp2Task);
Assert.Equal(1, messageCount);
Assert.Equal(0, finalMessageCount);
}
示例4: StartEnqueuedDownload
private IDownloadCancelHandle StartEnqueuedDownload(QueuedDownload download)
{
_downloadStartedEvent.OnNext(download);
App.Engine.StatisticsManager.LogDownloadStart(download);
Transport.PendingDownload pendingDownload = Transport.StartQueuedDownload(download);
pendingDownload.Response
.ObserveOnDispatcher()
.Subscribe<Transport.RunningDownload>(
activeDownload =>
{
if (activeDownload.Download.DownloadSize == long.MaxValue || activeDownload.Download.DownloadSize == 0)
{
activeDownload.Download.DownloadSize = activeDownload.ContentLength;
}
BackgroundWorker worker = new BackgroundWorker() { WorkerReportsProgress = true, WorkerSupportsCancellation = true };
// change cancel handle
startedDownloads[activeDownload.Download] = new ActiveDownloadCancelHandle(worker);
worker.DoWork += (sender, e) =>
{
Transport.RunningDownload dl = (Transport.RunningDownload)e.Argument;
BackgroundWorker bw = (BackgroundWorker)sender;
long bytesRead = dl.Download.DownloadedBytes;
// limited number of progress bar updates
var uiUpdates = new Subject<long>();
var cancelUiUpdates = uiUpdates
.Take(1)
.Merge(Observable.Empty<long>().Delay(TimeSpan.FromMilliseconds(KProgressUpdateInterval)))
.Repeat()
.Subscribe<long>(progress =>
{
if (bw.IsBusy)
{
bw.ReportProgress(0, progress);
}
});
if (dl is Transport.ActiveDownload)
{
string filePath = Utils.MediaFilePath(App.Engine.LoggedUser, dl.Download);
using (Stream writer = new IsolatedStorageFileStream(filePath, FileMode.Append, IsolatedStorageFile.GetUserStoreForApplication()))
{
using (Stream reader = ((Transport.ActiveDownload)dl).Stream)
{
byte[] buffer = new byte[16 * 1024];
int readCount;
while ((readCount = reader.Read(buffer, 0, buffer.Length)) > 0)
{
bytesRead += readCount;
writer.Write(buffer, 0, readCount);
uiUpdates.OnNext(bytesRead);
if (bw.CancellationPending)
{
pendingDownload.Cancel();
e.Cancel = true;
break;
}
}
bw.ReportProgress(0, bytesRead);
e.Result = activeDownload.Download;
}
}
cancelUiUpdates.Dispose();
}
if (dl is Transport.BackgroundDownload)
{
BackgroundTransferRequest downloadRequest = ((Transport.BackgroundDownload)dl).Request;
IObservable< IEvent <BackgroundTransferEventArgs> > requestObserver = Observable.FromEvent<BackgroundTransferEventArgs>(downloadRequest, "TransferStatusChanged");
if (downloadRequest.TransferStatus != TransferStatus.Completed)
{
if (downloadRequest.TransferStatus == TransferStatus.None)
{
downloadRequest.DownloadLocation = new Uri(Utils.BackgroundFilePath(App.Engine.LoggedUser, dl.Download), UriKind.RelativeOrAbsolute);
downloadRequest.TransferPreferences = TransferPreferences.AllowCellularAndBattery;
e.Result = activeDownload.Download;
BackgroundTransferService.Add(downloadRequest);
}
downloadRequest.TransferProgressChanged += (senderBackground, eventBackground) =>
{
if (activeDownload.Download.DownloadSize == long.MaxValue || activeDownload.Download.DownloadSize == 0)
{
activeDownload.Download.DownloadSize =
activeDownload.ContentLength == -1 ?
0:
activeDownload.ContentLength;
}
uiUpdates.OnNext(eventBackground.Request.BytesReceived);
};
IDisposable cancelOnStop = DownloadStopPendingEvent.Subscribe( stoppedDownload =>
{
if (dl.Download == stoppedDownload)
{
BackgroundTransferService.Remove(downloadRequest);
dl.Download.State = QueuedDownload.DownloadState.Stopped;
dl.Download.DownloadedBytes = 0;
//.........这里部分代码省略.........
示例5: RequestsToDifferentPathsArentDebounced
public async Task RequestsToDifferentPathsArentDebounced()
{
int messageCount = 0;
Subject<Unit> gate = new Subject<Unit>();
var fixture = CreateFixture(new TestHttpMessageHandler(_ => {
var ret = new HttpResponseMessage() {
Content = new StringContent("foo", Encoding.UTF8),
StatusCode = HttpStatusCode.OK,
};
ret.Headers.ETag = new EntityTagHeaderValue("\"worifjw\"");
messageCount++;
return gate.Take(1).Select(__ => ret);
}));
var client = new HttpClient(fixture) {
BaseAddress = new Uri("http://example"),
};
var rq1 = new HttpRequestMessage(HttpMethod.Get, "/foo");
var rq2 = new HttpRequestMessage(HttpMethod.Get, "/bar");
Assert.Equal(0, messageCount);
var resp1Task = client.SendAsync(rq1);
var resp2Task = client.SendAsync(rq2);
Assert.Equal(2, messageCount);
gate.OnNext(Unit.Default);
gate.OnNext(Unit.Default);
var resp1 = await resp1Task;
var resp2 = await resp2Task;
Assert.Equal(HttpStatusCode.OK, resp1.StatusCode);
Assert.Equal(HttpStatusCode.OK, resp2.StatusCode);
Assert.Equal(2, messageCount);
}