本文整理汇总了C#中ConcurrentQueue.Select方法的典型用法代码示例。如果您正苦于以下问题:C# ConcurrentQueue.Select方法的具体用法?C# ConcurrentQueue.Select怎么用?C# ConcurrentQueue.Select使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ConcurrentQueue
的用法示例。
在下文中一共展示了ConcurrentQueue.Select方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: ForEachIteratesItemsInParallel
public void ForEachIteratesItemsInParallel()
{
var strings = LangTestHelpers.RandomStrings(100, 50);
var results = new ConcurrentQueue<Tuple<int, string>>();
var threadCount = 10;
Parallel.ForEach(
threadCount,
CancellationToken.None,
strings,
it =>
{
Thread.Sleep(10);
results.Enqueue(Tuple.Create(Environment.CurrentManagedThreadId, it));
}
);
Assert.AreEqual(threadCount, results.Select(it => it.Item1).Distinct().Count());
CollectionAssert.AreEquivalent(strings, results.Select(it => it.Item2));
}
示例2: ExecuteAsParallel
public static void ExecuteAsParallel(this IEnumerable<Action> actions)
{
var _exceptions = new ConcurrentQueue<Exception>();
System.Threading.CancellationToken cts = default(System.Threading.CancellationToken);
try
{
Parallel.ForEach<Action>(actions, new ParallelOptions() { CancellationToken = cts }, a =>
a.Invoke());
}
catch (AggregateException agex)
{
agex.InnerExceptions.ToList().ForEach(_exceptions.Enqueue);
}
if (_exceptions.Any())
throw new ApplicationException(string.Format("Error: {0}", string.Join("\r\nError: ", _exceptions.Select(e => e.Message))));
}
示例3: IndexDocuments
public override void IndexDocuments(
AbstractViewGenerator viewGenerator,
IndexingBatch batch,
IStorageActionsAccessor actions,
DateTime minimumTimestamp)
{
var count = 0;
var sourceCount = 0;
var sw = Stopwatch.StartNew();
var start = SystemTime.UtcNow;
var deleted = new Dictionary<ReduceKeyAndBucket, int>();
var indexPerfStats = RecordCurrentBatch("Current Map", batch.Docs.Count);
batch.SetIndexingPerformance(indexPerfStats);
var documentsWrapped = batch.Docs.Select(doc =>
{
sourceCount++;
var documentId = doc.__document_id;
actions.MapReduce.DeleteMappedResultsForDocumentId((string)documentId, indexId, deleted);
return doc;
})
.Where(x => x is FilteredDocument == false)
.ToList();
var allReferencedDocs = new ConcurrentQueue<IDictionary<string, HashSet<string>>>();
var allReferenceEtags = new ConcurrentQueue<IDictionary<string, Etag>>();
var allState = new ConcurrentQueue<Tuple<HashSet<ReduceKeyAndBucket>, IndexingWorkStats, Dictionary<string, int>>>();
int loadDocumentCount = 0;
long loadDocumentDuration = 0;
BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, documentsWrapped, partition =>
{
var localStats = new IndexingWorkStats();
var localChanges = new HashSet<ReduceKeyAndBucket>();
var statsPerKey = new Dictionary<string, int>();
allState.Enqueue(Tuple.Create(localChanges, localStats, statsPerKey));
using (CurrentIndexingScope.Current = new CurrentIndexingScope(context.Database, PublicName))
{
// we are writing to the transactional store from multiple threads here, and in a streaming fashion
// should result in less memory and better perf
context.TransactionalStorage.Batch(accessor =>
{
var mapResults = RobustEnumerationIndex(partition, viewGenerator.MapDefinitions, localStats);
var currentDocumentResults = new List<object>();
string currentKey = null;
bool skipDocument = false;
foreach (var currentDoc in mapResults)
{
var documentId = GetDocumentId(currentDoc);
if (documentId != currentKey)
{
count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey);
currentDocumentResults.Clear();
currentKey = documentId;
}
else if (skipDocument)
{
continue;
}
currentDocumentResults.Add(new DynamicJsonObject(RavenJObject.FromObject(currentDoc, jsonSerializer)));
if (EnsureValidNumberOfOutputsForDocument(documentId, currentDocumentResults.Count) == false)
{
skipDocument = true;
currentDocumentResults.Clear();
continue;
}
Interlocked.Increment(ref localStats.IndexingSuccesses);
}
count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey);
});
allReferenceEtags.Enqueue(CurrentIndexingScope.Current.ReferencesEtags);
allReferencedDocs.Enqueue(CurrentIndexingScope.Current.ReferencedDocuments);
Interlocked.Add(ref loadDocumentCount, CurrentIndexingScope.Current.LoadDocumentCount);
Interlocked.Add(ref loadDocumentDuration, CurrentIndexingScope.Current.LoadDocumentDuration.ElapsedMilliseconds);
}
});
UpdateDocumentReferences(actions, allReferencedDocs, allReferenceEtags);
var changed = allState.SelectMany(x => x.Item1).Concat(deleted.Keys)
.Distinct()
.ToList();
var stats = new IndexingWorkStats(allState.Select(x => x.Item2));
var reduceKeyStats = allState.SelectMany(x => x.Item3)
.GroupBy(x => x.Key)
.Select(g => new { g.Key, Count = g.Sum(x => x.Value) })
.ToList();
BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, reduceKeyStats, enumerator => context.TransactionalStorage.Batch(accessor =>
{
while (enumerator.MoveNext())
{
var reduceKeyStat = enumerator.Current;
accessor.MapReduce.IncrementReduceKeyCounter(indexId, reduceKeyStat.Key, reduceKeyStat.Count);
}
//.........这里部分代码省略.........
示例4: IndexDocuments
public override IndexingPerformanceStats IndexDocuments(AbstractViewGenerator viewGenerator, IndexingBatch batch, IStorageActionsAccessor actions, DateTime minimumTimestamp, CancellationToken token)
{
token.ThrowIfCancellationRequested();
var count = 0;
var sourceCount = 0;
var deleted = new Dictionary<ReduceKeyAndBucket, int>();
var performance = RecordCurrentBatch("Current Map", "Map", batch.Docs.Count);
var performanceStats = new List<BasePerformanceStats>();
var usedStorageAccessors = new ConcurrentSet<IStorageActionsAccessor>();
if (usedStorageAccessors.TryAdd(actions))
{
var storageCommitDuration = new Stopwatch();
actions.BeforeStorageCommit += storageCommitDuration.Start;
actions.AfterStorageCommit += () =>
{
storageCommitDuration.Stop();
performanceStats.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
};
}
var deleteMappedResultsDuration = new Stopwatch();
var documentsWrapped = batch.Docs.Select(doc =>
{
token.ThrowIfCancellationRequested();
sourceCount++;
var documentId = doc.__document_id;
using (StopwatchScope.For(deleteMappedResultsDuration))
{
actions.MapReduce.DeleteMappedResultsForDocumentId((string)documentId, indexId, deleted);
}
return doc;
})
.Where(x => x is FilteredDocument == false)
.ToList();
performanceStats.Add(new PerformanceStats
{
Name = IndexingOperation.Map_DeleteMappedResults,
DurationMs = deleteMappedResultsDuration.ElapsedMilliseconds,
});
var allReferencedDocs = new ConcurrentQueue<IDictionary<string, HashSet<string>>>();
var allReferenceEtags = new ConcurrentQueue<IDictionary<string, Etag>>();
var allState = new ConcurrentQueue<Tuple<HashSet<ReduceKeyAndBucket>, IndexingWorkStats, Dictionary<string, int>>>();
var parallelOperations = new ConcurrentQueue<ParallelBatchStats>();
var parallelProcessingStart = SystemTime.UtcNow;
BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, documentsWrapped, partition =>
{
token.ThrowIfCancellationRequested();
var parallelStats = new ParallelBatchStats
{
StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds
};
var localStats = new IndexingWorkStats();
var localChanges = new HashSet<ReduceKeyAndBucket>();
var statsPerKey = new Dictionary<string, int>();
var linqExecutionDuration = new Stopwatch();
var reduceInMapLinqExecutionDuration = new Stopwatch();
var putMappedResultsDuration = new Stopwatch();
var convertToRavenJObjectDuration = new Stopwatch();
allState.Enqueue(Tuple.Create(localChanges, localStats, statsPerKey));
using (CurrentIndexingScope.Current = new CurrentIndexingScope(context.Database, PublicName))
{
// we are writing to the transactional store from multiple threads here, and in a streaming fashion
// should result in less memory and better perf
context.TransactionalStorage.Batch(accessor =>
{
if (usedStorageAccessors.TryAdd(accessor))
{
var storageCommitDuration = new Stopwatch();
accessor.BeforeStorageCommit += storageCommitDuration.Start;
accessor.AfterStorageCommit += () =>
{
storageCommitDuration.Stop();
parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
};
}
var mapResults = RobustEnumerationIndex(partition, viewGenerator.MapDefinitions, localStats, linqExecutionDuration);
var currentDocumentResults = new List<object>();
string currentKey = null;
//.........这里部分代码省略.........
示例5: SubmitRequestsAsync
public async Task<TimeSpan> SubmitRequestsAsync(int numRequests, int tps)
{
var delayBetweenRequests = TimeSpan.FromMilliseconds(1000.0/tps);
var requestTimes = new ConcurrentQueue<TimeSpan>();
var responseTasks = new List<Task>();
for (int i = 0; i < numRequests; i++)
{
var stopwatch = Stopwatch.StartNew();
var request = new TestRequest
{
RequestNumber = i,
StartTime = DateTimeOffset.UtcNow
};
Debug.WriteLine("Submitting request {0}", i);
var responseTask = serviceClient.SubmitRequestAsync(request, requestTimeout);
Task continueWith = responseTask.ContinueWith(task =>
{
var testResponse = task.Result;
var now = DateTimeOffset.UtcNow;
var processingTime = now - testResponse.StartTime;
Debug.WriteLine("Request {0} took {1}", testResponse.RequestNumber, processingTime);
requestTimes.Enqueue(processingTime);
});
responseTasks.Add(continueWith);
responseTasks.Add(responseTask);
stopwatch.Stop();
var delayTime = delayBetweenRequests - stopwatch.Elapsed;
if (delayTime > TimeSpan.Zero)
{
await Task.Delay(delayTime);
}
}
await Task.WhenAll(responseTasks.ToArray());
if (!requestTimes.Any())
{
throw new Exception("Failed to retrieve any results. Are you sure your queue names are configured correctly?");
}
double averageMs = requestTimes.Select(t => t.TotalMilliseconds).Average();
return TimeSpan.FromMilliseconds(averageMs);
}
示例6: GetTaskIdsForExecutionsOptions
static IEnumerable<int[]> GetTaskIdsForExecutionsOptions (
ExecutionDataflowBlockOptions options)
{
var blockFactories =
new Func<ConcurrentQueue<Tuple<int, int>>, ITargetBlock<int>>[]
{
q => new ActionBlock<int> (
i => q.Enqueue (Tuple.Create (i, Task.CurrentId.Value)), options),
q => new TransformBlock<int, int> (i =>
{
q.Enqueue (Tuple.Create (i, Task.CurrentId.Value));
return i;
}, options),
q => new TransformManyBlock<int, int> (i =>
{
q.Enqueue (Tuple.Create (i, Task.CurrentId.Value));
return new[] { i };
}, options)
};
foreach (var factory in blockFactories) {
var queue = new ConcurrentQueue<Tuple<int, int>> ();
var block = factory (queue);
Assert.IsEmpty (queue);
for (int i = 0; i < 100; i++)
block.Post (i);
block.Complete ();
var source = block as ISourceBlock<int>;
if (source != null) {
Assert.IsFalse (block.Completion.Wait (100));
source.LinkTo (new BufferBlock<int> ());
}
Assert.IsTrue (block.Completion.Wait (1000));
CollectionAssert.AreEquivalent (
Enumerable.Range (0, 100), queue.Select (t => t.Item1));
yield return queue.Select (t => t.Item2).ToArray ();
}
}
示例7: InstallSatellitePackages
private bool InstallSatellitePackages(IFileSystem packagesFolderFileSystem, ConcurrentQueue<JObject> satellitePackages)
{
if (satellitePackages.Count == 0)
{
return false;
}
var packageManager = CreatePackageManager(packagesFolderFileSystem, useSideBySidePaths: true);
var filesystemInstallationTarget = new FilesystemInstallationTarget(packageManager);
var packageActions = satellitePackages.Select(packageJSON => new NewPackageAction(NuGet.Client.PackageActionType.Download,
new PackageIdentity(packageJSON[Properties.PackageId].ToString(), new NuGetVersion (packageJSON[Properties.Version].ToString())), packageJSON, filesystemInstallationTarget, SourceRepository, null));
// BUGBUG: See PackageExtractor.cs for locking mechanism used to handle concurrency
NuGet.Client.Installation.ActionExecutor actionExecutor = new Client.Installation.ActionExecutor();
actionExecutor.ExecuteActions(packageActions, Console);
return true;
}
示例8: CanDistributeWorkAmongAddedWorkers
public void CanDistributeWorkAmongAddedWorkers(int numberOfMessages, int numberOfWorkerEndpoints)
{
var workerQueueNames = Enumerable
.Range(1, numberOfWorkerEndpoints)
.Select(workerNumber => string.Format("test.loadbalancer.worker.{0:00}", workerNumber))
.ToList();
Console.WriteLine(@"Load balancer test running - will send {0} messages to load balancer configured with endpoints:
{1}
",
numberOfMessages, string.Join(Environment.NewLine, workerQueueNames.Select(name => " " + name)));
var workDone = new ConcurrentQueue<WorkDone>();
foreach (var queueName in workerQueueNames)
{
StartWorkerBus(queueName, workDone);
service.AddDestinationQueue(queueName);
queuesToReset.Add(queueName);
}
service.Start();
var sender = Configure.With(TrackDisposable(new BuiltinContainerAdapter()))
.MessageOwnership(o => o.Use(this))
.Transport(t => t.UseMsmqInOneWayClientMode())
.CreateBus().Start();
var messagesToSend = Enumerable.Range(0, numberOfMessages)
.Select(id => new Work {MessageId = id})
.ToList();
messagesToSend.ForEach(sender.Send);
var waitStartTime = DateTime.UtcNow;
while (waitStartTime.ElapsedUntilNow() < TimeSpan.FromSeconds(5 + (numberOfMessages/100)))
{
Thread.Sleep(100);
if (workDone.Count >= numberOfMessages) break;
}
Thread.Sleep(2.Seconds());
workDone.Count.ShouldBe(numberOfMessages);
workDone.Select(w => w.MessageId).OrderBy(w => w)
.ShouldBe(Enumerable.Range(0, numberOfMessages));
var groupedByWorkers = workDone.GroupBy(w => w.WorkerQueueName);
Console.WriteLine(@"Messages were processed like this:
{0}", string.Join(Environment.NewLine, groupedByWorkers.Select(g => string.Format(" " + g.Key + ": " + new string('*', g.Count())))));
groupedByWorkers.Count().ShouldBe(numberOfWorkerEndpoints);
}
示例9: SubmitRequestsAsync
public async Task<TimeSpan> SubmitRequestsAsync(int numRequests, int tps)
{
var delayBetweenRequests = TimeSpan.FromMilliseconds(1000.0 / tps);
var requestTimes = new ConcurrentQueue<TimeSpan>();
var responseTasks = new List<Task>();
using (var client = new HttpClient())
{
client.BaseAddress = serviceUri;
client.DefaultRequestHeaders.Accept.Clear();
client.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));
for (int i = 0; i < numRequests; i++)
{
var stopwatch = Stopwatch.StartNew();
var request = new TestRequest
{
RequestNumber = i,
StartTime = DateTimeOffset.UtcNow
};
Debug.WriteLine("Submitting request {0}", i);
// New code:
Task<HttpResponseMessage> responseTask = client.PostAsJsonAsync("api/service/", request);
Task continueWith = responseTask.ContinueWith(
async task =>
{
var response = task.Result;
if (response.IsSuccessStatusCode)
{
var testResponse = await response.Content.ReadAsAsync<TestResponse>();
var now = DateTimeOffset.UtcNow;
var processingTime = now - testResponse.StartTime;
Debug.WriteLine("Request {0} took {1}", testResponse.RequestNumber, processingTime);
requestTimes.Enqueue(processingTime);
}
});
responseTasks.Add(continueWith);
responseTasks.Add(responseTask);
stopwatch.Stop();
var delayTime = delayBetweenRequests - stopwatch.Elapsed;
if (delayTime > TimeSpan.Zero)
{
await Task.Delay(delayTime);
}
}
await Task.WhenAll(responseTasks.ToArray());
}
if (!requestTimes.Any())
{
throw new Exception("Failed to retrieve any results. Are you sure your queue names are configured correctly?");
}
double averageMs = requestTimes.Select(t => t.TotalMilliseconds).Average();
return TimeSpan.FromMilliseconds(averageMs);
}
示例10: InstallSatellitePackages
private bool InstallSatellitePackages(IFileSystem packagesFolderFileSystem, ConcurrentQueue<IPackage> satellitePackages)
{
if (satellitePackages.Count == 0)
{
return false;
}
var packageManager = CreatePackageManager(packagesFolderFileSystem, useSideBySidePaths: true);
var executor = new ActionExecutor();
var operations = satellitePackages.Select(package =>
new Resolver.PackageSolutionAction(PackageActionType.AddToPackagesFolder, package, packageManager));
executor.Execute(operations);
return true;
}