当前位置: 首页>>代码示例>>C#>>正文


C# ConcurrentQueue.SelectMany方法代码示例

本文整理汇总了C#中ConcurrentQueue.SelectMany方法的典型用法代码示例。如果您正苦于以下问题:C# ConcurrentQueue.SelectMany方法的具体用法?C# ConcurrentQueue.SelectMany怎么用?C# ConcurrentQueue.SelectMany使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ConcurrentQueue的用法示例。


在下文中一共展示了ConcurrentQueue.SelectMany方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: SingleStepReduce

		private void SingleStepReduce(IndexToWorkOn index, string[] keysToReduce, AbstractViewGenerator viewGenerator,
												ConcurrentSet<object> itemsToDelete)
		{
			var needToMoveToSingleStepQueue = new ConcurrentQueue<HashSet<string>>();

			Log.Debug(() => string.Format("Executing single step reducing for {0} keys [{1}]", keysToReduce.Length, string.Join(", ", keysToReduce)));
			var batchTimeWatcher = Stopwatch.StartNew();
			var count = 0;
			var size = 0;
			var state = new ConcurrentQueue<Tuple<HashSet<string>, List<MappedResultInfo>>>();
			var reducingBatchThrottlerId = Guid.NewGuid();
			try
			{
				BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, keysToReduce, enumerator =>
				{
					var localNeedToMoveToSingleStep = new HashSet<string>();
					needToMoveToSingleStepQueue.Enqueue(localNeedToMoveToSingleStep);
					var localKeys = new HashSet<string>();
					while (enumerator.MoveNext())
					{
						localKeys.Add(enumerator.Current);
					}

					transactionalStorage.Batch(actions =>
					{
						var getItemsToReduceParams = new GetItemsToReduceParams(index: index.IndexName, reduceKeys: localKeys, level: 0,
							loadData: false,
							itemsToDelete: itemsToDelete)
						{
							Take = int.MaxValue// just get all, we do the rate limit when we load the number of keys to reduce, anyway
						};

					
						var scheduledItems = actions.MapReduce.GetItemsToReduce(getItemsToReduceParams).ToList();
						autoTuner.CurrentlyUsedBatchSizes.GetOrAdd(reducingBatchThrottlerId, scheduledItems.Sum(x => x.Size));
						if (scheduledItems.Count == 0)
						{
							if (Log.IsWarnEnabled)
							{
								Log.Warn("Found single reduce items ({0}) that didn't have any items to reduce. Deleting level 1 & level 2 items for those keys. (If you can reproduce this, please contact [email protected])",
									string.Join(", ", keysToReduce));
							}
							// Here we have an interesting issue. We have scheduled reductions, because GetReduceTypesPerKeys() returned them
							// and at the same time, we don't have any at level 0. That probably means that we have them at level 1 or 2.
							// They shouldn't be here, and indeed, we remove them just a little down from here in this function.
							// That said, they might bave smuggled in between versions, or something happened to cause them to be here.
							// In order to avoid that, we forcibly delete those extra items from the scheduled reductions, and move on
							foreach (var reduceKey in keysToReduce)
							{
								actions.MapReduce.DeleteScheduledReduction(index.IndexName, 1, reduceKey);
								actions.MapReduce.DeleteScheduledReduction(index.IndexName, 2, reduceKey);
							}
						}

						foreach (var reduceKey in localKeys)
						{
							var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexName, reduceKey);

							if (lastPerformedReduceType != ReduceType.SingleStep)
								localNeedToMoveToSingleStep.Add(reduceKey);

							if (lastPerformedReduceType != ReduceType.MultiStep)
								continue;

							Log.Debug("Key {0} was moved from multi step to single step reduce, removing existing reduce results records",
								reduceKey);

							// now we are in single step but previously multi step reduce was performed for the given key
							var mappedBuckets = actions.MapReduce.GetMappedBuckets(index.IndexName, reduceKey).ToList();

							// add scheduled items too to be sure we will delete reduce results of already deleted documents
							mappedBuckets.AddRange(scheduledItems.Select(x => x.Bucket));

							foreach (var mappedBucket in mappedBuckets.Distinct())
							{
								actions.MapReduce.RemoveReduceResults(index.IndexName, 1, reduceKey, mappedBucket);
								actions.MapReduce.RemoveReduceResults(index.IndexName, 2, reduceKey, mappedBucket / 1024);
							}
						}

						var mappedResults = actions.MapReduce.GetMappedResults(
							index.IndexName,
							localKeys,
							loadData: true
							).ToList();

						Interlocked.Add(ref count, mappedResults.Count);
						Interlocked.Add(ref size, mappedResults.Sum(x => x.Size));

						mappedResults.ApplyIfNotNull(x => x.Bucket = 0);

						state.Enqueue(Tuple.Create(localKeys, mappedResults));
					});
				});

				var reduceKeys = new HashSet<string>(state.SelectMany(x => x.Item1));

				var results = state.SelectMany(x => x.Item2)
					.Where(x => x.Data != null)
					.GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data))
//.........这里部分代码省略.........
开发者ID:925coder,项目名称:ravendb,代码行数:101,代码来源:ReducingExecuter.cs

示例2: IndexDocuments


//.........这里部分代码省略.........
							currentDocumentResults.Add(new DynamicJsonObject(currentDocJObject));

							if (EnsureValidNumberOfOutputsForDocument(documentId, currentDocumentResults.Count) == false)
							{
								skipDocument = true;
								currentDocumentResults.Clear();
								continue;
							}

							Interlocked.Increment(ref localStats.IndexingSuccesses);
						}
						count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey, reduceInMapLinqExecutionDuration, putMappedResultsDuration, convertToRavenJObjectDuration);

						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.LoadDocument, CurrentIndexingScope.Current.LoadDocumentDuration.ElapsedMilliseconds));
						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_MapExecution, linqExecutionDuration.ElapsedMilliseconds));
						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_ReduceLinqExecution, reduceInMapLinqExecutionDuration.ElapsedMilliseconds));
						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_PutMappedResults, putMappedResultsDuration.ElapsedMilliseconds));
						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ConvertToRavenJObject, convertToRavenJObjectDuration.ElapsedMilliseconds));

						parallelOperations.Enqueue(parallelStats);
					});

					allReferenceEtags.Enqueue(CurrentIndexingScope.Current.ReferencesEtags);
					allReferencedDocs.Enqueue(CurrentIndexingScope.Current.ReferencedDocuments);
				}
			});

			performanceStats.Add(new ParallelPerformanceStats
			{
				NumberOfThreads = parallelOperations.Count,
				DurationMs = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds,
				BatchedOperations = parallelOperations.ToList()
			});

			var updateDocumentReferencesDuration = new Stopwatch();
			using (StopwatchScope.For(updateDocumentReferencesDuration))
			{
			UpdateDocumentReferences(actions, allReferencedDocs, allReferenceEtags);
			}
			performanceStats.Add(PerformanceStats.From(IndexingOperation.UpdateDocumentReferences, updateDocumentReferencesDuration.ElapsedMilliseconds));

			var changed = allState.SelectMany(x => x.Item1).Concat(deleted.Keys)
					.Distinct()
					.ToList();

			var stats = new IndexingWorkStats(allState.Select(x => x.Item2));
			var reduceKeyStats = allState.SelectMany(x => x.Item3)
										 .GroupBy(x => x.Key)
										 .Select(g => new { g.Key, Count = g.Sum(x => x.Value) })
										 .ToList();

			BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, reduceKeyStats, enumerator => context.TransactionalStorage.Batch(accessor =>
			{
				while (enumerator.MoveNext())
				{
					var reduceKeyStat = enumerator.Current;
					accessor.MapReduce.IncrementReduceKeyCounter(indexId, reduceKeyStat.Key, reduceKeyStat.Count);
				}
			}));


			var parallelReductionOperations = new ConcurrentQueue<ParallelBatchStats>();
			var parallelReductionStart = SystemTime.UtcNow;

			BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, changed, enumerator => context.TransactionalStorage.Batch(accessor =>
			{
				var parallelStats = new ParallelBatchStats
				{
					StartDelay = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds
				};

				var scheduleReductionsDuration = new Stopwatch();

				using (StopwatchScope.For(scheduleReductionsDuration))
				{
					while (enumerator.MoveNext())
					{
						accessor.MapReduce.ScheduleReductions(indexId, 0, enumerator.Current);
					}
				}

				parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds));
				parallelReductionOperations.Enqueue(parallelStats);
			}));

			performanceStats.Add(new ParallelPerformanceStats
			{
				NumberOfThreads = parallelReductionOperations.Count,
				DurationMs = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds,
				BatchedOperations = parallelReductionOperations.ToList()
			});

			UpdateIndexingStats(context, stats);

			performance.OnCompleted = () => BatchCompleted("Current Map", "Map", sourceCount, count, performanceStats);

			logIndexing.Debug("Mapped {0} documents for {1}", count, indexId);

			return performance;
		}
开发者ID:felipeleusin,项目名称:ravendb,代码行数:101,代码来源:MapReduceIndex.cs

示例3: IndexDocuments

		public override void IndexDocuments(
			AbstractViewGenerator viewGenerator,
			IndexingBatch batch,
			IStorageActionsAccessor actions,
			DateTime minimumTimestamp)
		{
			var count = 0;
			var sourceCount = 0;
			var sw = Stopwatch.StartNew();
			var start = SystemTime.UtcNow;
			var deleted = new Dictionary<ReduceKeyAndBucket, int>();
			var indexPerfStats = RecordCurrentBatch("Current Map", batch.Docs.Count);
			batch.SetIndexingPerformance(indexPerfStats);

			var documentsWrapped = batch.Docs.Select(doc =>
			{
				sourceCount++;
				var documentId = doc.__document_id;
				actions.MapReduce.DeleteMappedResultsForDocumentId((string)documentId, indexId, deleted);
				return doc;
			})
				.Where(x => x is FilteredDocument == false)
				.ToList();
			var allReferencedDocs = new ConcurrentQueue<IDictionary<string, HashSet<string>>>();
			var allReferenceEtags = new ConcurrentQueue<IDictionary<string, Etag>>();
			var allState = new ConcurrentQueue<Tuple<HashSet<ReduceKeyAndBucket>, IndexingWorkStats, Dictionary<string, int>>>();

			int loadDocumentCount = 0;
			long loadDocumentDuration = 0;
			BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, documentsWrapped, partition =>
			{
				var localStats = new IndexingWorkStats();
				var localChanges = new HashSet<ReduceKeyAndBucket>();
				var statsPerKey = new Dictionary<string, int>();
				allState.Enqueue(Tuple.Create(localChanges, localStats, statsPerKey));

				using (CurrentIndexingScope.Current = new CurrentIndexingScope(context.Database, PublicName))
				{
					// we are writing to the transactional store from multiple threads here, and in a streaming fashion
					// should result in less memory and better perf
					context.TransactionalStorage.Batch(accessor =>
					{
						var mapResults = RobustEnumerationIndex(partition, viewGenerator.MapDefinitions, localStats);
						var currentDocumentResults = new List<object>();
						string currentKey = null;
						bool skipDocument = false;
						foreach (var currentDoc in mapResults)
						{
							var documentId = GetDocumentId(currentDoc);
							if (documentId != currentKey)
							{
								count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey);
								currentDocumentResults.Clear();
								currentKey = documentId;
							}
							else if (skipDocument)
							{
								continue;
							}
							currentDocumentResults.Add(new DynamicJsonObject(RavenJObject.FromObject(currentDoc, jsonSerializer)));

							if (EnsureValidNumberOfOutputsForDocument(documentId, currentDocumentResults.Count) == false)
							{
								skipDocument = true;
								currentDocumentResults.Clear();
								continue;
							}

							Interlocked.Increment(ref localStats.IndexingSuccesses);
						}
						count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey);
					});
					allReferenceEtags.Enqueue(CurrentIndexingScope.Current.ReferencesEtags);
					allReferencedDocs.Enqueue(CurrentIndexingScope.Current.ReferencedDocuments);
					Interlocked.Add(ref loadDocumentCount, CurrentIndexingScope.Current.LoadDocumentCount);
					Interlocked.Add(ref loadDocumentDuration, CurrentIndexingScope.Current.LoadDocumentDuration.ElapsedMilliseconds);
				}
			});



			UpdateDocumentReferences(actions, allReferencedDocs, allReferenceEtags);

			var changed = allState.SelectMany(x => x.Item1).Concat(deleted.Keys)
					.Distinct()
					.ToList();

			var stats = new IndexingWorkStats(allState.Select(x => x.Item2));
			var reduceKeyStats = allState.SelectMany(x => x.Item3)
										 .GroupBy(x => x.Key)
										 .Select(g => new { g.Key, Count = g.Sum(x => x.Value) })
										 .ToList();

			BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, reduceKeyStats, enumerator => context.TransactionalStorage.Batch(accessor =>
			{
				while (enumerator.MoveNext())
				{
					var reduceKeyStat = enumerator.Current;
					accessor.MapReduce.IncrementReduceKeyCounter(indexId, reduceKeyStat.Key, reduceKeyStat.Count);
				}
//.........这里部分代码省略.........
开发者ID:bbqchickenrobot,项目名称:ravendb,代码行数:101,代码来源:MapReduceIndex.cs

示例4: SingleStepReduce

		private void SingleStepReduce(IndexToWorkOn index, string[] keysToReduce, AbstractViewGenerator viewGenerator,
												List<object> itemsToDelete)
		{
			var needToMoveToSingleStep = new HashSet<string>();

			Log.Debug(() => string.Format("Executing single step reducing for {0} keys [{1}]", keysToReduce.Length, string.Join(", ", keysToReduce)));
			var batchTimeWatcher = Stopwatch.StartNew();
			var count = 0;
			var size = 0;
			var state = new ConcurrentQueue <Tuple<HashSet<string>, List<MappedResultInfo>>>();
			BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, keysToReduce, enumerator =>
			{
				var localKeys = new HashSet<string>();
				while (enumerator.MoveNext())
				{
					localKeys.Add(enumerator.Current);
				}
				transactionalStorage.Batch(actions =>
				{
					var getItemsToReduceParams = new GetItemsToReduceParams(index: index.IndexName, reduceKeys: localKeys, level: 0,
																			loadData: false,
																			itemsToDelete: itemsToDelete)
					{
						Take = int.MaxValue// just get all, we do the rate limit when we load the number of keys to reduce, anyway
					};
					var scheduledItems = actions.MapReduce.GetItemsToReduce(getItemsToReduceParams).ToList();

					foreach (var reduceKey in localKeys)
					{
						var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexName, reduceKey);

						if (lastPerformedReduceType != ReduceType.SingleStep)
							needToMoveToSingleStep.Add(reduceKey);

						if (lastPerformedReduceType != ReduceType.MultiStep)
							continue;

						Log.Debug("Key {0} was moved from multi step to single step reduce, removing existing reduce results records",
							reduceKey);

						// now we are in single step but previously multi step reduce was performed for the given key
						var mappedBuckets = actions.MapReduce.GetMappedBuckets(index.IndexName, reduceKey).ToList();

						// add scheduled items too to be sure we will delete reduce results of already deleted documents
						mappedBuckets.AddRange(scheduledItems.Select(x => x.Bucket));

						foreach (var mappedBucket in mappedBuckets.Distinct())
						{
							actions.MapReduce.RemoveReduceResults(index.IndexName, 1, reduceKey, mappedBucket);
							actions.MapReduce.RemoveReduceResults(index.IndexName, 2, reduceKey, mappedBucket / 1024);
						}
					}

					var mappedResults = actions.MapReduce.GetMappedResults(
							index.IndexName,
							localKeys,
							loadData: true
						).ToList();

					Interlocked.Add(ref count, mappedResults.Count);
					Interlocked.Add(ref size, mappedResults.Sum(x => x.Size));

					mappedResults.ApplyIfNotNull(x => x.Bucket = 0);

					state.Enqueue(Tuple.Create(localKeys, mappedResults));
				});
			});

			var reduceKeys = new HashSet<string>(state.SelectMany(x=>x.Item1));

			var results = state.SelectMany(x=>x.Item2)
						.Where(x => x.Data != null)
						.GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data))
						.ToArray();
			context.ReducedPerSecIncreaseBy(results.Length);

			context.TransactionalStorage.Batch(actions => 
				context.IndexStorage.Reduce(index.IndexName, viewGenerator, results, 2, context, actions, reduceKeys)
				);
			
			autoTuner.AutoThrottleBatchSize(count, size, batchTimeWatcher.Elapsed);

			foreach (var reduceKey in needToMoveToSingleStep)
			{
				string localReduceKey = reduceKey;
				transactionalStorage.Batch(actions =>
					actions.MapReduce.UpdatePerformedReduceType(index.IndexName, localReduceKey, ReduceType.SingleStep));
			}
		}
开发者ID:samueldjack,项目名称:ravendb,代码行数:89,代码来源:ReducingExecuter.cs


注:本文中的ConcurrentQueue.SelectMany方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。