本文整理汇总了C#中System.Threading.Task.All方法的典型用法代码示例。如果您正苦于以下问题:C# Task.All方法的具体用法?C# Task.All怎么用?C# Task.All使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类System.Threading.Task
的用法示例。
在下文中一共展示了Task.All方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: WaitAndPump
public void WaitAndPump(Task[] tasks)
{
while (true)
{
if (tasks.All(t => t.IsCompleted)) break;
DoEvents();
}
}
示例2: FromCommandLineArgsRaceCondition
public void FromCommandLineArgsRaceCondition() {
// https://pytools.codeplex.com/workitem/1429
var mre = new ManualResetEvent(false);
var tasks = new Task<bool>[100];
try {
for (int i = 0; i < tasks.Length; i += 1) {
tasks[i] = Task.Run(() => {
mre.WaitOne();
using (var arg = VisualStudioProxy.FromProcessId(123)) {
return arg is VisualStudioProxy;
}
});
}
mre.Set();
Assert.IsTrue(Task.WaitAll(tasks, TimeSpan.FromSeconds(30.0)));
Assert.IsTrue(tasks.All(t => t.Result));
} finally {
mre.Dispose();
Task.WaitAll(tasks, TimeSpan.FromSeconds(30.0));
}
}
示例3: ConcurrentOperationsAreSerialized
public async Task ConcurrentOperationsAreSerialized()
{
byte[] data = Enumerable.Range(0, 1000).Select(i => (byte)i).ToArray();
var mcaos = new ManuallyReleaseAsyncOperationsStream();
var stream = new BufferedStream(mcaos, 1);
var tasks = new Task[4];
for (int i = 0; i < 4; i++)
{
tasks[i] = stream.WriteAsync(data, 250 * i, 250);
}
Assert.False(tasks.All(t => t.IsCompleted));
mcaos.Release();
await Task.WhenAll(tasks);
stream.Position = 0;
for (int i = 0; i < tasks.Length; i++)
{
Assert.Equal(i, stream.ReadByte());
}
}
示例4: Should_control_executions_queuing_and_rejections_per_specification_with_cancellations
public void Should_control_executions_queuing_and_rejections_per_specification_with_cancellations(
int maxParallelization, int maxQueuingActions, int totalActions, string because, bool cancelQueuing,
bool cancelExecuting)
{
if (totalActions < 0) throw new ArgumentOutOfRangeException(nameof(totalActions));
because = String.Format("MaxParallelization {0}; MaxQueuing {1}; TotalActions {2}; CancelQueuing {3}; CancelExecuting {4}: {5}", maxParallelization, maxQueuingActions, totalActions, cancelQueuing, cancelExecuting, because);
BulkheadPolicy<ResultPrimitive> bulkhead = Policy.BulkheadAsync<ResultPrimitive>(maxParallelization, maxQueuingActions);
// Set up delegates which we can track whether they've started; and control when we allow them to complete (to release their semaphore slot).
actions = new TraceableAction[totalActions];
for (int i = 0; i < totalActions; i++) { actions[i] = new TraceableAction(i, statusChanged, testOutputHelper); }
// Throw all the delegates at the bulkhead simultaneously.
Task<ResultPrimitive>[] tasks = new Task<ResultPrimitive>[totalActions];
for (int i = 0; i < totalActions; i++) { tasks[i] = actions[i].ExecuteOnBulkheadAsync<ResultPrimitive>(bulkhead); }
testOutputHelper.WriteLine("Immediately after queueing...");
testOutputHelper.WriteLine("Bulkhead: {0} slots out of {1} available.", bulkhead.BulkheadAvailableCount, maxParallelization);
testOutputHelper.WriteLine("Bulkhead queue: {0} slots out of {1} available.", bulkhead.QueueAvailableCount, maxQueuingActions);
OutputActionStatuses();
// Assert the expected distributions of executing, queuing, rejected and completed - when all delegates thrown at bulkhead.
int expectedCompleted = 0;
int expectedCancelled = 0;
int expectedExecuting = Math.Min(totalActions, maxParallelization);
int expectedRejects = Math.Max(0, totalActions - maxParallelization - maxQueuingActions);
int expectedQueuing = Math.Min(maxQueuingActions, Math.Max(0, totalActions - maxParallelization));
int expectedBulkheadFree = maxParallelization - expectedExecuting;
int expectedQueueFree = maxQueuingActions - expectedQueuing;
try
{
actions.Count(a => a.Status == TraceableActionStatus.Faulted).Should().Be(0);
Within(shimTimeSpan, () => actions.Count(a => a.Status == TraceableActionStatus.Executing).Should().Be(expectedExecuting, because + ", when checking expectedExecuting"));
Within(shimTimeSpan, () => actions.Count(a => a.Status == TraceableActionStatus.QueueingForSemaphore).Should().Be(expectedQueuing, because + ", when checking expectedQueuing"));
Within(shimTimeSpan, () => actions.Count(a => a.Status == TraceableActionStatus.Rejected).Should().Be(expectedRejects, because + ", when checking expectedRejects"));
actions.Count(a => a.Status == TraceableActionStatus.Completed).Should().Be(expectedCompleted, because + ", when checking expectedCompleted");
actions.Count(a => a.Status == TraceableActionStatus.Canceled).Should().Be(expectedCancelled, because + ", when checking expectedCancelled");
Within(shimTimeSpan, () => bulkhead.BulkheadAvailableCount.Should().Be(expectedBulkheadFree, because + ", when checking expectedBulkheadFree"));
Within(shimTimeSpan, () => bulkhead.QueueAvailableCount.Should().Be(expectedQueueFree, because + ", when checking expectedQueueFree"));
}
finally
{
testOutputHelper.WriteLine("Expected initial state verified...");
testOutputHelper.WriteLine("Bulkhead: {0} slots out of {1} available.", bulkhead.BulkheadAvailableCount, maxParallelization);
testOutputHelper.WriteLine("Bulkhead queue: {0} slots out of {1} available.", bulkhead.QueueAvailableCount, maxQueuingActions);
OutputActionStatuses();
}
// Complete or cancel delegates one by one, and expect others to take their place (if a slot released and others remain queueing); until all work is done.
while (expectedExecuting > 0)
{
if (cancelQueuing)
{
testOutputHelper.WriteLine("Cancelling a queueing task...");
actions.First(a => a.Status == TraceableActionStatus.QueueingForSemaphore).Cancel();
expectedCancelled++;
expectedQueuing--;
expectedQueueFree++;
cancelQueuing = false;
}
else if (cancelExecuting)
{
testOutputHelper.WriteLine("Cancelling an executing task...");
actions.First(a => a.Status == TraceableActionStatus.Executing).Cancel();
expectedCancelled++;
if (expectedQueuing > 0)
{
expectedQueuing--;
expectedQueueFree++;
}
else
{
expectedExecuting--;
expectedBulkheadFree++;
}
cancelExecuting = false;
}
else // Complete an executing delegate.
{
testOutputHelper.WriteLine("Completing a task...");
actions.First(a => a.Status == TraceableActionStatus.Executing).AllowCompletion();
expectedCompleted++;
if (expectedQueuing > 0)
{
expectedQueuing--;
expectedQueueFree++;
}
else
{
//.........这里部分代码省略.........
示例5: TestMaxNumberOfGroups
private static bool TestMaxNumberOfGroups(bool greedy, bool sync)
{
Contract.Assert(greedy || !sync, "Non-greedy sync doesn't make sense.");
bool passed = true;
for (int maxNumberOfGroups = 1; maxNumberOfGroups <= 21; maxNumberOfGroups += 20)
{
for (int itemsPerBatch = 1; itemsPerBatch <= 1; itemsPerBatch++)
{
var options = new GroupingDataflowBlockOptions { MaxNumberOfGroups = maxNumberOfGroups, Greedy = greedy };
var batch = new BatchBlock<int>(itemsPerBatch, options);
// Feed all N batches; all should succeed
for (int batchNum = 0; batchNum < maxNumberOfGroups; batchNum++)
{
var sendAsyncs = new Task<bool>[itemsPerBatch];
for (int itemNum = 0; itemNum < itemsPerBatch; itemNum++)
{
if (sync)
{
Assert.True(batch.Post(itemNum), string.Format("FAILED batch.Post({0}) on MaxNOG {1}", itemNum, batchNum));
}
else
{
sendAsyncs[itemNum] = batch.SendAsync(itemNum);
}
}
if (!sync)
{
Assert.True(Task.WaitAll(sendAsyncs, 4000),
string.Format("FAILED batch.SendAsyncs should have been completed in batch num {0}", batchNum));
if (passed)
{
Assert.True(sendAsyncs.All(t => t.Status == TaskStatus.RanToCompletion && t.Result),
string.Format("FAILED batch.SendAsyncs should have been completed in batch num {0}", batchNum));
}
}
}
// Next message should fail in greedy mode
if (greedy)
{
if (sync)
{
Assert.False(batch.Post(1), "FAILED batch.Post(1) after completed groups should be declind");
}
else
{
var t = batch.SendAsync(1);
Assert.True(t != null && t.Status == TaskStatus.RanToCompletion && t.Result == false, "FAILED batch.SendAsync(1) after completed groups should be declined");
}
}
// Wait until the all batches are produced
Assert.True(SpinWait.SpinUntil(() => batch.OutputCount == maxNumberOfGroups, 4000), "FAILED All batches should have been produced");
// Next message should fail, even after groups have been produced
if (sync)
{
Assert.False(batch.Post(1), "FAILED batch.Post(1) after completed groups are output should be declind");
}
else
{
var t = batch.SendAsync(1);
Assert.True(t != null && t.Status == TaskStatus.RanToCompletion && t.Result == false, "FAILED batch.SendAsync(1) after completed groups are output should be declined");
}
}
}
Assert.True(passed, string.Format("{0}", passed ? "Passed" : "FAILED"));
return passed;
}