本文整理汇总了C#中ConcurrentDictionary.Select方法的典型用法代码示例。如果您正苦于以下问题:C# ConcurrentDictionary.Select方法的具体用法?C# ConcurrentDictionary.Select怎么用?C# ConcurrentDictionary.Select使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ConcurrentDictionary
的用法示例。
在下文中一共展示了ConcurrentDictionary.Select方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: PostCreatedFiles
public static void PostCreatedFiles(ConcurrentDictionary<string, LocalFile> files, string uploadUrl)
{
var localFiles = files.ToList();
// Create post content
var content = new MultipartFormDataContent();
// Check we have content to send
if (!LoadFileToDataContent(content, files.Select(x => x.Value).ToList()))
{
Console.WriteLine("Failed to load file or no content as loaded. Cancel upload");
return;
}
SendPostData(content, uploadUrl);
if (BaseConfig.Synchronize)
{
localFiles.ForEach(x => x.Value.Synchronized = true);
}
else
{
// Remove files from queue
foreach (var file in localFiles)
{
LocalFile deleted;
if (!files.TryRemove(file.Key, out deleted))
{
Console.WriteLine("Failed to remove");
}
}
}
}
示例2: MissingRepos
public async Task<IActionResult> MissingRepos()
{
var gitHubName = HttpContext.User.Identity.Name;
var gitHubAccessToken = await HttpContext.Authentication.GetTokenAsync("access_token");
var gitHubClient = GitHubUtils.GetGitHubClient(gitHubAccessToken);
var repoDataSet = await RepoSetProvider.GetRepoDataSet();
var repoSetLists = repoDataSet.GetRepoSetLists();
var distinctOrgs =
repoSetLists
.SelectMany(
repoSet => repoSet.Value.Repos.Select(repoDefinition => repoDefinition.Owner))
.Distinct(StringComparer.OrdinalIgnoreCase)
.OrderBy(org => org).ToList();
var allOrgRepos = new ConcurrentDictionary<string, string[]>(StringComparer.OrdinalIgnoreCase);
var result = AsyncParallelUtils.ForEachAsync(distinctOrgs, 5, async org =>
{
var reposInOrg = await gitHubClient.Repository.GetAllForOrg(org);
allOrgRepos[org] = reposInOrg.Where(repo => !repo.Fork).Select(repo => repo.Name).ToArray();
});
await result;
var missingOrgRepos = allOrgRepos.Select(org =>
new MissingRepoSet
{
Org = org.Key,
MissingRepos =
org.Value
.Except(
repoSetLists
.SelectMany(repoSetList => repoSetList.Value.Repos)
.Select(repoDefinition => repoDefinition.Name), StringComparer.OrdinalIgnoreCase)
.OrderBy(repo => repo, StringComparer.OrdinalIgnoreCase)
.ToList(),
})
.OrderBy(missingRepoSet => missingRepoSet.Org, StringComparer.OrdinalIgnoreCase)
.ToList();
return View(new MissingReposViewModel
{
GitHubUserName = gitHubName,
RepoSetNames = repoDataSet.GetRepoSetLists().Select(repoSetList => repoSetList.Key).ToArray(),
MissingRepos = missingOrgRepos,
});
}
示例3: RemoveObjectsFromTenantAsync
private async Task RemoveObjectsFromTenantAsync()
{
var client = TestClients.GetSAuthc1Client();
var results = new ConcurrentDictionary<string, Exception>();
// Delete applications
var deleteApplicationTasks = this.CreatedApplicationHrefs.Select(async href =>
{
try
{
var application = await client.GetResourceAsync<IApplication>(href);
var deleteResult = await application.DeleteAsync();
results.TryAdd(href, null);
}
catch (ResourceException rex)
{
if (rex.Code == 404)
{
// Already deleted
results.TryAdd(href, null);
}
}
catch (Exception e)
{
results.TryAdd(href, e);
}
});
// Delete directories
var deleteDirectoryTasks = this.CreatedDirectoryHrefs.Select(async href =>
{
try
{
var directory = await client.GetResourceAsync<IDirectory>(href);
var deleteResult = await directory.DeleteAsync();
results.TryAdd(href, null);
}
catch (ResourceException rex)
{
if (rex.Code == 404)
{
// Already deleted
results.TryAdd(href, null);
}
}
catch (Exception e)
{
results.TryAdd(href, e);
}
});
// Delete organizations
var deleteOrganizationTasks = this.CreatedOrganizationHrefs.Select(async href =>
{
try
{
var org = await client.GetResourceAsync<IOrganization>(href);
var deleteResult = await org.DeleteAsync();
results.TryAdd(href, null);
}
catch (ResourceException rex)
{
if (rex.Code == 404)
{
// Already deleted
results.TryAdd(href, null);
}
}
catch (Exception e)
{
results.TryAdd(href, e);
}
});
await Task.WhenAll(
Task.WhenAll(deleteApplicationTasks),
Task.WhenAll(deleteDirectoryTasks),
Task.WhenAll(deleteOrganizationTasks));
// All done! Throw errors if any occurred
bool anyErrors = results.Any(kvp => kvp.Value != null);
if (anyErrors)
{
throw new ApplicationException(
"Errors occurred during test cleanup. Full log: " + Environment.NewLine
+ string.Join(Environment.NewLine, results.Select(kvp => $"{kvp.Key} : '{(kvp.Value == null ? "Good" : kvp.Value.Message)}'")));
}
}
示例4: GetData
public BermudaResult GetData(string domain, IEnumerable<string> blobs, string query, string mapreduce, string merge, DateTime minDate, DateTime maxDate, int remdepth, object[] parameters, string command)
{
var args = ParseCommand(command);
if (remdepth > 0)
{
//map
var blobInterfaces = blobs == null ? AzureInterface.Instance.ListBlobs(domain, minDate.Ticks, maxDate.Ticks) : AzureInterface.Instance.GetBlobInterfacesByNames(domain, blobs);
var blobSetKey = GetQueryChecksum(domain, string.Join(",", blobInterfaces.Select(x => x.Name)), query, mapreduce, minDate, maxDate, parameters, null);
//reduce
BermudaResult cachedDatapoints;
if (CachedData.TryGetValue(blobSetKey, out cachedDatapoints) && (DateTime.Now.Ticks - cachedDatapoints.CreatedOn) < CacheLifetime)
{
if (CacheTraceMessageLevel < 3) Trace.WriteLine("returned CACHED BLOBS DATAPOINTS results FOR ENTIRE BLOB SET [REMDEPTH:" + remdepth + "]");
return new BermudaResult { DataType = cachedDatapoints.DataType, Data = cachedDatapoints.Data, MetadataObject = new BermudaNodeStatistic { Notes = "Cache_Hit_1" } };
}
else
{
var assignments = PartitionBlobs(domain, blobInterfaces, minDate, maxDate, false, true);
if (!assignments.Any()) throw new Exception("Specified dataset not loaded: " + domain);
ConcurrentDictionary<IPEndPoint, BermudaResult> results = new ConcurrentDictionary<IPEndPoint, BermudaResult>();
Stopwatch sw = new Stopwatch();
sw.Start();
List<Task> tasks = new List<Task>();
foreach (var ass in assignments)
{
Task t = new Task((assObj) =>
{
ZipMetadata assignment = assObj as ZipMetadata;
var initiated = DateTime.Now;
var blobSubsetKey = GetQueryChecksum(domain, string.Join(",", assignment.Blobs.Select(x => x.Name)), query, mapreduce, minDate, maxDate, parameters, assignment.PeerEndpoint.ToString());
Stopwatch sw3 = new Stopwatch();
sw3.Start();
//see if the cache contains a matching result and return it if it's not outdated
BermudaResult cachedDatapoints2;
if (CachedData.TryGetValue(blobSubsetKey, out cachedDatapoints2) && (DateTime.Now.Ticks - cachedDatapoints2.CreatedOn) < CacheLifetime)
{
if (CacheTraceMessageLevel < 2) Trace.WriteLine("returned CACHED BLOB DATAPOINT results FOR BLOB SUBSET [REMDEPTH:" + remdepth + "]");
results[assignment.PeerEndpoint] = new BermudaResult { DataType = cachedDatapoints2.DataType, Data = cachedDatapoints2.Data, MetadataObject = new BermudaNodeStatistic { Notes = "Cache_Hit_2" } };
}
else
{
try
{
Stopwatch sw2 = new Stopwatch();
sw2.Start();
BermudaResult subresult = null;
if (assignment.PeerEndpoint.Equals(Endpoint))
{
subresult = GetData(domain, assignment.Blobs.Select(x => x.Name), query, mapreduce, merge, minDate, maxDate, remdepth - 1, parameters, command);
}
else
{
using (var client = AzureInterface.Instance.GetServiceClient(assignment.PeerEndpoint))
{
subresult = client.GetData(domain, query, mapreduce, merge, minDate, maxDate, remdepth - 1, parameters, command);
}
}
sw2.Stop();
subresult.CreatedOn = DateTime.Now.Ticks;
subresult.MetadataObject.Initiated = initiated;
subresult.MetadataObject.Completed = DateTime.Now;
subresult.MetadataObject.OperationTime = sw2.Elapsed;
results[assignment.PeerEndpoint] = CachedData[blobSubsetKey] = subresult;
}
catch (Exception ex)
{
results[assignment.PeerEndpoint] = new BermudaResult { Error = "[Failed Node] " + ex };
}
}
}, ass, TaskCreationOptions.LongRunning);
tasks.Add(t);
t.Start();
}
Task.WaitAll(tasks.ToArray());
sw.Stop();
Trace.WriteLine("Join Time:" + sw.Elapsed);
if (results.All(x => x.Value.Error != null)) throw new Exception("All nodes failed:\r\n" + string.Join("\r\n", results.Select(x => x.Value.Error)));
//if all results are not the same time throw an error
if (results.GroupBy(x => x.Value.DataType).Count() > 1) throw new Exception("Subresults must all return the same type");
var dataTypeDescriptor = results.Select(x => x.Value.DataType).FirstOrDefault(x => x != null);
if (dataTypeDescriptor == null) return new BermudaResult { Error = "Could not determine the merge type, none of the nodes provided type info" };
//.........这里部分代码省略.........
示例5: MigratePermissions0_9
private void MigratePermissions0_9(IUnitOfWork uow)
{
var PermissionsDict = new ConcurrentDictionary<ulong, ServerPermissions0_9>();
if (!Directory.Exists("data/permissions/"))
{
_log.Warn("No data from permissions will be migrated.");
return;
}
foreach (var file in Directory.EnumerateFiles("data/permissions/"))
{
try
{
var strippedFileName = Path.GetFileNameWithoutExtension(file);
if (string.IsNullOrWhiteSpace(strippedFileName)) continue;
var id = ulong.Parse(strippedFileName);
var data = JsonConvert.DeserializeObject<ServerPermissions0_9>(File.ReadAllText(file));
PermissionsDict.TryAdd(id, data);
}
catch { }
}
var i = 0;
PermissionsDict
.Select(p => new { data = p.Value, gconfig = uow.GuildConfigs.For(p.Key) })
.AsParallel()
.ForAll(perms =>
{
try
{
var data = perms.data;
var gconfig = perms.gconfig;
gconfig.PermissionRole = data.PermissionsControllerRole;
gconfig.VerbosePermissions = data.Verbose;
gconfig.FilteredWords = new HashSet<FilteredWord>(data.Words.Select(w => w.ToLowerInvariant())
.Distinct()
.Select(w => new FilteredWord() { Word = w }));
gconfig.FilterWords = data.Permissions.FilterWords;
gconfig.FilterInvites = data.Permissions.FilterInvites;
gconfig.FilterInvitesChannelIds = new HashSet<FilterChannelId>();
gconfig.FilterInvitesChannelIds.AddRange(data.ChannelPermissions.Where(kvp => kvp.Value.FilterInvites)
.Select(cp => new FilterChannelId()
{
ChannelId = cp.Key
}));
gconfig.FilterWordsChannelIds = new HashSet<FilterChannelId>();
gconfig.FilterWordsChannelIds.AddRange(data.ChannelPermissions.Where(kvp => kvp.Value.FilterWords)
.Select(cp => new FilterChannelId()
{
ChannelId = cp.Key
}));
gconfig.CommandCooldowns = new HashSet<CommandCooldown>(data.CommandCooldowns
.Where(cc => !string.IsNullOrWhiteSpace(cc.Key) && cc.Value > 0)
.Select(cc => new CommandCooldown()
{
CommandName = cc.Key,
Seconds = cc.Value
}));
_log.Info("Migrating data from permissions folder for {0} done ({1})", gconfig.GuildId, ++i);
}
catch (Exception ex)
{
_log.Error(ex);
}
});
try { Directory.Move("data/permissions", "data/DELETE_ME_permissions"); } catch { }
}
示例6: MigrateServerSpecificConfigs0_9
private void MigrateServerSpecificConfigs0_9(IUnitOfWork uow)
{
const string specificConfigsPath = "data/ServerSpecificConfigs.json";
if (!File.Exists(specificConfigsPath))
{
_log.Warn($"No data from {specificConfigsPath} will be migrated.");
return;
}
var configs = new ConcurrentDictionary<ulong, ServerSpecificConfig>();
try
{
configs = JsonConvert
.DeserializeObject<ConcurrentDictionary<ulong, ServerSpecificConfig>>(
File.ReadAllText(specificConfigsPath), new JsonSerializerSettings()
{
Error = (s, e) =>
{
if (e.ErrorContext.Member.ToString() == "GenerateCurrencyChannels")
{
e.ErrorContext.Handled = true;
}
}
});
}
catch (Exception ex)
{
_log.Warn(ex, "ServerSpecificConfig deserialization failed");
return;
}
var i = 0;
var selfAssRoles = new ConcurrentHashSet<SelfAssignedRole>();
configs
.Select(p => new { data = p.Value, gconfig = uow.GuildConfigs.For(p.Key) })
.AsParallel()
.ForAll(config =>
{
try
{
var guildConfig = config.gconfig;
var data = config.data;
guildConfig.AutoAssignRoleId = data.AutoAssignedRole;
guildConfig.DeleteMessageOnCommand = data.AutoDeleteMessagesOnCommand;
guildConfig.DefaultMusicVolume = data.DefaultMusicVolume;
guildConfig.ExclusiveSelfAssignedRoles = data.ExclusiveSelfAssignedRoles;
guildConfig.GenerateCurrencyChannelIds = new HashSet<GCChannelId>(data.GenerateCurrencyChannels.Select(gc => new GCChannelId() { ChannelId = gc.Key }));
selfAssRoles.AddRange(data.ListOfSelfAssignableRoles.Select(r => new SelfAssignedRole() { GuildId = guildConfig.GuildId, RoleId = r }).ToArray());
var logSetting = guildConfig.LogSetting;
guildConfig.LogSetting.IsLogging = data.LogChannel != null;
guildConfig.LogSetting.ChannelId = data.LogChannel ?? 0;
guildConfig.LogSetting.IgnoredChannels = new HashSet<IgnoredLogChannel>(data.LogserverIgnoreChannels.Select(id => new IgnoredLogChannel() { ChannelId = id }));
guildConfig.LogSetting.LogUserPresence = data.LogPresenceChannel != null;
guildConfig.LogSetting.UserPresenceChannelId = data.LogPresenceChannel ?? 0;
guildConfig.FollowedStreams = new HashSet<FollowedStream>(data.ObservingStreams.Select(x =>
{
FollowedStream.FollowedStreamType type = FollowedStream.FollowedStreamType.Twitch;
switch (x.Type)
{
case StreamNotificationConfig0_9.StreamType.Twitch:
type = FollowedStream.FollowedStreamType.Twitch;
break;
case StreamNotificationConfig0_9.StreamType.Beam:
type = FollowedStream.FollowedStreamType.Beam;
break;
case StreamNotificationConfig0_9.StreamType.Hitbox:
type = FollowedStream.FollowedStreamType.Hitbox;
break;
default:
break;
}
return new FollowedStream()
{
ChannelId = x.ChannelId,
GuildId = guildConfig.GuildId,
Username = x.Username.ToLowerInvariant(),
Type = type
};
}));
guildConfig.VoicePlusTextEnabled = data.VoicePlusTextEnabled;
_log.Info("Migrating SpecificConfig for {0} done ({1})", guildConfig.GuildId, ++i);
}
catch (Exception ex)
{
_log.Error(ex);
}
});
uow.SelfAssignedRoles.AddRange(selfAssRoles.ToArray());
try { File.Move("data/ServerSpecificConfigs.json", "data/DELETE_ME_ServerSpecificCOnfigs.json"); } catch { }
}
示例7: DashboardViewModel
/// <summary>
/// Initializes a new instance of the <see cref="DashboardViewModel"/> class.
/// </summary>
public DashboardViewModel()
{
var universities = new UniversityBuilder().Build(2).ToList();
this.DataPoints = new ObservableCollection<DataPoint>();
var mongoDbRepository = new MongoDbRepository(ConfigurationManager.AppSettings["MongoDbConnectionString"]);
var entityFrameworkRepository = new EntityFrameworkRepository(ConfigurationManager.AppSettings["EntityFrameworkConnectionString"]);
var adoRepository = new AdoRepository(ConfigurationManager.AppSettings["AdoConnectionString"]);
var redisRepository = new RedisRepository(ConfigurationManager.AppSettings["RedisConnectionString"]);
var repositories = new List<IRepository> { mongoDbRepository, entityFrameworkRepository, adoRepository, redisRepository };
var progress = new ConcurrentDictionary<IRepository, int>(repositories.Select(r => new KeyValuePair<IRepository, int>(r, 0)));
this.AdoNetRepository = new ChartableRepository
{
Title = "ADO.NET",
Key = "Ado",
Brush = new SolidColorBrush((Color)ColorConverter.ConvertFromString("#EC098C")),
Repository = adoRepository
};
this.Ef5Repository = new ChartableRepository
{
Title = "EF5",
Key = "EntityFramework",
Brush = new SolidColorBrush((Color)ColorConverter.ConvertFromString("#00B159")),
Repository = entityFrameworkRepository
};
this.MongoDbRepository = new ChartableRepository
{
Title = "MongoDB",
Key = "MongoDB",
Brush = new SolidColorBrush((Color)ColorConverter.ConvertFromString("#7C4199")),
Repository = mongoDbRepository
};
this.RedisRepository = new ChartableRepository
{
Title = "Redis",
Key = "Redis",
Brush = new SolidColorBrush((Color)ColorConverter.ConvertFromString("#F37735")),
Repository = redisRepository
};
this.ChartableRepositories = new List<ChartableRepository>{ this.AdoNetRepository, this.Ef5Repository, this.MongoDbRepository, this.RedisRepository };
var timer = new TickOnceDispatcherTimer(new TimeSpan(0, 0, 0, 0, 100));
timer.Tick += delegate
{
var datapoint = new DataPoint
{
Ticks = (this.DataPoints.Count + 1) / 10.0,
MongoDB = progress[mongoDbRepository],
EntityFramework = progress[entityFrameworkRepository],
Ado = progress[adoRepository],
Redis = progress[redisRepository]
};
this.DataPoints.Add(datapoint);
if (this.AdoNetRepository.LastDuration == default(TimeSpan) && datapoint.Ado == universities.Count)
{
this.AdoNetRepository.LastDuration = Stopwatch.Elapsed;
}
if (this.Ef5Repository.LastDuration == default(TimeSpan) && datapoint.EntityFramework == universities.Count)
{
this.Ef5Repository.LastDuration = Stopwatch.Elapsed;
}
if (this.MongoDbRepository.LastDuration == default(TimeSpan) && datapoint.MongoDB == universities.Count)
{
this.MongoDbRepository.LastDuration = Stopwatch.Elapsed;
}
if (this.RedisRepository.LastDuration == default(TimeSpan) && datapoint.Redis == universities.Count)
{
this.RedisRepository.LastDuration = Stopwatch.Elapsed;
}
this.OnPropertyChanged("Stopwatch");
};
Action<IRepository> notifyProgress = delegate(IRepository repository)
{
if (!timer.IsEnabled)
{
timer.Start();
}
progress[repository]++;
};
EventHandler commandCompleteEventHandler = delegate
{
Task.Factory.StartNew(timer.StopWhenTicked);
//.........这里部分代码省略.........
示例8: CanDoCompetingConsumers
public void CanDoCompetingConsumers(int messageCount, int threadCount)
{
using (CreateQueue(InputQueueName).Purge()) { }
var keepRunning = true;
var lastMessageReceivedTime = DateTime.UtcNow;
var receivedMessagesDistribution = new ConcurrentDictionary<int, int>();
var receivers = Enumerable.Range(0, threadCount)
.Select(i =>
{
var queue = TrackDisposable(CreateQueue(InputQueueName));
var number = i + 1;
return new Thread(() =>
{
Console.WriteLine("Receiver {0} started", number);
while (keepRunning)
{
using (var tx = new TransactionScope())
{
var receivedMessage = queue.ReceiveMessage(new AmbientTransactionContext());
if (receivedMessage != null)
{
receivedMessagesDistribution.AddOrUpdate(number, (key) => 1, (key, value) => value + 1);
Console.Write(".");
lastMessageReceivedTime = DateTime.UtcNow;
}
tx.Complete();
}
}
Console.WriteLine("Receiver {0} stopped", number);
});
})
.ToList();
var sender = CreateQueue("test_competing_sender");
Console.WriteLine("Sending {0} messages", messageCount);
messageCount.Times(() => sender.Send(InputQueueName, new TransportMessageToSend
{
Headers = new Dictionary<string, object>(),
Body = Encoding.UTF8.GetBytes("w00000t!")
}, new NoTransaction()));
Console.WriteLine("Starting {0} receivers", receivers.Count);
receivers.ForEach(r => r.Start());
lastMessageReceivedTime = DateTime.UtcNow;
while (lastMessageReceivedTime.ElapsedUntilNow() < 3.Seconds())
{
Console.WriteLine("Waiting...");
Thread.Sleep(2.Seconds());
}
Console.WriteLine("Stopping receivers...");
keepRunning = false;
receivers.ForEach(r => r.Join());
Console.WriteLine("Got {0} messages distributed among workers like this:", receivedMessagesDistribution.Sum(d => d.Value));
Console.WriteLine(string.Join(Environment.NewLine, receivedMessagesDistribution.Select(kvp => string.Format("{0:000}: {1}", kvp.Key, new string('=', kvp.Value)))));
}
示例9: RunDictionaryTest_Add1
private static bool RunDictionaryTest_Add1(int cLevel, int initSize, int threads, int addsPerThread)
{
TestHarness.TestLog(
"* RunDictionaryTest_Add1(cLevel={0}, initSize={1}, threads={2}, addsPerThread={3})",
cLevel, initSize, threads, addsPerThread);
IDictionary<int, int> dict = new ConcurrentDictionary<int, int>(cLevel, 1);
int count = threads;
using (ManualResetEvent mre = new ManualResetEvent(false))
{
for (int i = 0; i < threads; i++)
{
int ii = i;
ThreadPool.QueueUserWorkItem(
(o) =>
{
for (int j = 0; j < addsPerThread; j++)
{
dict.Add(j + ii * addsPerThread, -(j + ii * addsPerThread));
}
if (Interlocked.Decrement(ref count) == 0) mre.Set();
});
}
mre.WaitOne();
}
if (dict.Any(pair => pair.Key != -pair.Value))
{
TestHarness.TestLog(" > Invalid value for some key in the dictionary.");
return false;
}
var gotKeys = dict.Select(pair => pair.Key).OrderBy(i => i).ToArray();
var expectKeys = Enumerable.Range(0, threads * addsPerThread);
if (!gotKeys.SequenceEqual(expectKeys))
{
TestHarness.TestLog(" > The set of keys in the dictionary is invalid.");
return false;
}
// Finally, let's verify that the count is reported correctly.
int expectedCount = threads * addsPerThread;
if (dict.Count != expectedCount || dict.ToArray().Length != expectedCount || dict.ToList().Count() != expectedCount)
{
TestHarness.TestLog(" > Incorrect count of elements reported for the dictionary.");
return false;
}
return true;
}
示例10: RunDictionaryTest
private static bool RunDictionaryTest(int cLevel, int initSize, int threads, int addsPerThread, TestMethod testMethod)
{
TestHarness.TestLog("* RunDictionaryTest_{0}, Level={1}, initSize={2}, threads={3}, addsPerThread={4})",
PrintTestMethod(testMethod), cLevel, initSize, threads, addsPerThread);
ConcurrentDictionary<int, int> dict = new ConcurrentDictionary<int, int>(cLevel, 1);
int count = threads;
using (ManualResetEvent mre = new ManualResetEvent(false))
{
for (int i = 0; i < threads; i++)
{
int ii = i;
ThreadPool.QueueUserWorkItem(
(o) =>
{
for (int j = 0; j < addsPerThread; j++)
{
//call either of the two overloads of GetOrAdd
if (j + ii % 2 == 0)
{
dict.GetOrAdd(j, -j);
}
else
{
dict.GetOrAdd(j, x => -x);
}
}
if (Interlocked.Decrement(ref count) == 0) mre.Set();
});
}
mre.WaitOne();
}
bool passed = true;
if (dict.Any(pair => pair.Key != -pair.Value))
{
TestHarness.TestLog(" > Invalid value for some key in the dictionary.");
passed = false;
}
var gotKeys = dict.Select(pair => pair.Key).OrderBy(i => i).ToArray();
var expectKeys = Enumerable.Range(0, addsPerThread);
if (!gotKeys.SequenceEqual(expectKeys))
{
TestHarness.TestLog(" > The set of keys in the dictionary is invalid.");
passed = false;
}
// Finally, let's verify that the count is reported correctly.
int expectedCount = addsPerThread;
int count1 = dict.Count, count2 = dict.ToArray().Length,
count3 = dict.ToList().Count;
if (count1 != expectedCount || count2 != expectedCount || count3 != expectedCount)
{
TestHarness.TestLog(" > Incorrect count of elements reported for the dictionary. Expected {0}, Dict.Count {1}, ToArray.Length {2}, ToList.Count {3}",
expectedCount, count1, count2, count3);
passed = false;
}
return passed;
}
示例11: RunDictionaryTest_Update1
private static bool RunDictionaryTest_Update1(int cLevel, int threads, int updatesPerThread)
{
TestHarness.TestLog("* RunDictionaryTest_Update1(cLevel={0}, threads={1}, updatesPerThread={2})", cLevel, threads, updatesPerThread);
IDictionary<int, int> dict = new ConcurrentDictionary<int, int>(cLevel, 1);
for (int i = 1; i <= updatesPerThread; i++) dict[i] = i;
int running = threads;
using (ManualResetEvent mre = new ManualResetEvent(false))
{
for (int i = 0; i < threads; i++)
{
int ii = i;
ThreadPool.QueueUserWorkItem(
(o) =>
{
for (int j = 1; j <= updatesPerThread; j++)
{
dict[j] = (ii + 2) * j;
}
if (Interlocked.Decrement(ref running) == 0) mre.Set();
});
}
mre.WaitOne();
}
if ((from pair in dict
let div = pair.Value / pair.Key
let rem = pair.Value % pair.Key
select rem != 0 || div < 2 || div > threads + 1)
.Any(res => res))
{
TestHarness.TestLog(" > Invalid value for some key in the dictionary.");
return false;
}
var gotKeys = dict.Select(pair => pair.Key).OrderBy(i => i).ToArray();
var expectKeys = Enumerable.Range(1, updatesPerThread);
if (!gotKeys.SequenceEqual(expectKeys))
{
TestHarness.TestLog(" > The set of keys in the dictionary is invalid.");
return false;
}
return true;
}
示例12: SuggestEntites
/// <summary>
/// Suggests other potential options based on the entities passed
/// </summary>
/// <param name="rows">The rows being imported</param>
/// <returns>Entites with suggestions</returns>
public Suggestions SuggestEntites(ImportRow[] rows)
{
var rowSuggestionsConcurrentDictionary = new ConcurrentDictionary<int, RowSuggestions>();
var suggestionsToReturn = new Suggestions();
var clients = new ConcurrentDictionary<Guid, FoundOps.Api.Models.Client>();
var locations = new ConcurrentDictionary<Guid, FoundOps.Api.Models.Location>();
var contactInfoSets = new ConcurrentDictionary<Guid, FoundOps.Api.Models.ContactInfo>();
Parallel.For((long)0, rows.Count(), rowIndex =>
{
var row = rows[rowIndex];
var rowSuggestions = new RowSuggestions();
#region Location
if (row.Location != null)
{
//Find all the Locations to be suggested by finding all Locations for the Client of the row
var locationSuggestions = row.Client != null
? _locations.Where(l => l.Value.ClientId == row.Client.Id).ToArray()
: null;
if (locationSuggestions != null)
{
//Add any of the suggestions to the rows suggestions
rowSuggestions.LocationSuggestions.AddRange(locationSuggestions.Select(l => l.Key));
var convertedLocationSuggestions = locationSuggestions.Select(l => l.Value).Select(FoundOps.Api.Models.Location.ConvertModel);
//Add all suggested Locations to the list of Locations to be returned
foreach (var location in convertedLocationSuggestions)
locations.GetOrAdd(location.Id, location);
}
//Add the matched/new location as the first suggestion
rowSuggestions.LocationSuggestions.Add(row.Location.Id);
//Add the location passed to the list of location entites
locations.GetOrAdd(row.Location.Id, row.Location);
}
#endregion
#region Client
if (row.Client != null)
{
//Find all the Clients to be suggested by finding all Clients for the Location of the row
var clientSuggestions = row.Location != null
? _clients.Where(c => c.Key == row.Location.ClientId).ToArray()
: null;
if (clientSuggestions != null)
{
//Add any of the suggestions to the rows suggestions
rowSuggestions.ClientSuggestions.AddRange(clientSuggestions.Select(c => c.Key));
var convertedClientSuggestions = clientSuggestions.Select(c => c.Value).Select(FoundOps.Api.Models.Client.ConvertModel);
//Add all suggested Clients to the list of Clients to be returned
foreach (var client in convertedClientSuggestions)
clients.GetOrAdd(client.Id, client);
}
//Add the matched/new client as the first suggestion
rowSuggestions.ClientSuggestions.Add(row.Client.Id);
//Add the Client passed to the list of client entites
clients.GetOrAdd(row.Client.Id, row.Client);
}
#endregion
//Repeat
if (row.Repeat != null)
rowSuggestions.Repeats.Add(row.Repeat);
//Contact Info
if (row.ContactInfoSet.Count != 0)
{
rowSuggestions.ContactInfoSuggestions.AddRange(row.ContactInfoSet.Select(ci => ci.Id));
foreach (var contactInfoSet in row.ContactInfoSet)
contactInfoSets.GetOrAdd(contactInfoSet.Id, contactInfoSet);
}
//Add this row's suggestions to the list to be returned
rowSuggestionsConcurrentDictionary.GetOrAdd((int)rowIndex, rowSuggestions);
});
//Order the row suggestions by rowIndex
suggestionsToReturn.RowSuggestions.AddRange(rowSuggestionsConcurrentDictionary.OrderBy(kvp => kvp.Key).Select(kvp => kvp.Value));
//Only add distinct Clients
var distinctClients = clients.Distinct();
//.........这里部分代码省略.........
示例13: ValidateThenSuggestEntities
//.........这里部分代码省略.........
if (dayStrings.Any(s => s == "s" || s == "sa" || s == "sat" || s == "saturday"))
daysOfWeek.Add(DayOfWeek.Saturday);
//Make sure the days include the startdate
if (!daysOfWeek.Contains(startDayOfWeek))
daysOfWeek.Add(startDayOfWeek);
repeat.FrequencyDetailAsWeeklyFrequencyDetail = daysOfWeek.OrderBy(e => (int)e).ToArray();
}
}
if (repeat.Frequency == Frequency.Monthly)
{
if (string.IsNullOrEmpty(val) || val == "date")
{
repeat.FrequencyDetailAsMonthlyFrequencyDetail = MonthlyFrequencyDetail.OnDayInMonth;
}
else if (val == "day")
{
var detailsAvailable = repeat.AvailableMonthlyFrequencyDetailTypes.ToList();
if (detailsAvailable.Count() > 1)
detailsAvailable.Remove(MonthlyFrequencyDetail.OnDayInMonth);
repeat.FrequencyDetailAsMonthlyFrequencyDetail = detailsAvailable.First();
}
}
#endregion
repeat.StatusInt = repeat.RepeatEveryTimes == null || repeat.FrequencyInt == null || setError
? (int)ImportStatus.Error
: (int)ImportStatus.New;
importRow.Repeat = repeat;
}
#endregion
#region Contact Info
//Create label and value dictionaries for Phone Number contact information
var phoneValueDictionary = phoneNumberValueCols.ToDictionary(p => Convert.ToInt32(p.Split(' ').ElementAt(2)), p => row[Array.IndexOf(headers, p)]);
var phoneLabelDictionary = phoneNumberLabelCols.ToDictionary(p => Convert.ToInt32(p.Split(' ').ElementAt(2)), p => row[Array.IndexOf(headers, p)]);
//Create label and value dictionaries for Email Address contact information
var emailValueDictionary = emailValueCols.ToDictionary(e => Convert.ToInt32(e.Split(' ').ElementAt(2)), e => row[Array.IndexOf(headers, e)]);
var emailLabelDictionary = emailLabelCols.ToDictionary(e => Convert.ToInt32(e.Split(' ').ElementAt(2)), e => row[Array.IndexOf(headers, e)]);
//Create label and value dictionaries for Website contact information
var websiteValueDictionary = websiteValueCols.ToDictionary(w => Convert.ToInt32(w.Split(' ').ElementAt(2)), w => row[Array.IndexOf(headers, w)]);
var websiteLabelDictionary = websiteLabelCols.ToDictionary(w => Convert.ToInt32(w.Split(' ').ElementAt(2)), w => row[Array.IndexOf(headers, w)]);
//Create label and value dictionaries for any other types of contact information
var otherValueDictionary = otherValueCols.ToDictionary(o => Convert.ToInt32(o.Split(' ').ElementAt(2)), o => row[Array.IndexOf(headers, o)]);
var otherLabelDictionary = otherLabelCols.ToDictionary(o => Convert.ToInt32(o.Split(' ').ElementAt(2)), o => row[Array.IndexOf(headers, o)]);
//Find which type of contact info is being imported the most
//This way we only have one loop
var maxLabel = Math.Max(Math.Max(phoneLabelDictionary.Count, emailLabelDictionary.Count), Math.Max(websiteLabelDictionary.Count, otherLabelDictionary.Count));
var maxValue = Math.Max(Math.Max(phoneValueDictionary.Count, emailValueDictionary.Count), Math.Max(websiteValueDictionary.Count, otherValueDictionary.Count));
var max = Math.Max(maxLabel, maxValue);
//Dictionary of Cantact Information to be imported for the row
var concurrentContactInfoDictionary = new ConcurrentDictionary<Guid, ContactInfo>();
Parallel.For((long)1, max + 1, contactIndex =>
{
//Phone
if (phoneLabelDictionary.Count >= contactIndex || phoneValueDictionary.Count >= contactIndex)
MatchContactInfo(phoneLabelDictionary, phoneValueDictionary, contactIndex, concurrentContactInfoDictionary, "Phone Number");
//Email
if (emailLabelDictionary.Count >= contactIndex || emailValueDictionary.Count >= contactIndex)
MatchContactInfo(emailLabelDictionary, emailValueDictionary, contactIndex, concurrentContactInfoDictionary, "Email Address");
//Website
if (websiteLabelDictionary.Count >= contactIndex || websiteValueDictionary.Count >= contactIndex)
MatchContactInfo(websiteLabelDictionary, websiteValueDictionary, contactIndex, concurrentContactInfoDictionary, "Website");
//Other
if (otherLabelDictionary.Count >= contactIndex || otherValueDictionary.Count >= contactIndex)
MatchContactInfo(otherLabelDictionary, otherValueDictionary, contactIndex, concurrentContactInfoDictionary, "Other");
});
//Once all the contact info sets are made or matched, add them to the ImportRow
importRow.ContactInfoSet.AddRange(concurrentContactInfoDictionary.Select(ci => ci.Value));
#endregion
//Add the ImportRow to the concurrent dictionary of ImportRows
importRowsConcurrentDictionary.GetOrAdd((int)rowIndex, importRow);
});
//Order the ImportRows by rowIndex
var importRows = importRowsConcurrentDictionary.OrderBy(kvp => kvp.Key).Select(kvp => kvp.Value).ToArray();
//Send the validated ImportRows to get suggestions and return
return SuggestEntites(importRows);
}
示例14: GetStatus
public string GetStatus(bool isInternal)
{
var reducers = HostEnvironment.Instance.GetAvailablePeerConnections(10000);
ConcurrentDictionary<string, string> results = new ConcurrentDictionary<string, string>();
List<Task> statusTasks = new List<Task>();
foreach (var reducer in reducers)
{
Task t = new Task((r) =>
{
var rie = r as PeerInfo;
string id = rie.Id.Split('_').LastOrDefault();
try
{
if (rie.Equals(Endpoint))
{
var memStatus = SystemInfo.GetMemoryStatusEx();
var usedMem = Math.Round((double)(memStatus.ullTotalPhys - memStatus.ullAvailPhys) / 1073741824d, 2); //Math.Round( (double)GC.GetTotalMemory(false) / 1073741824d, 4);
var availMem = Math.Round((double)memStatus.ullAvailPhys / 1073741824d, 2);
string sub = string.Format(" Memory Usage: Used={0}GB, Available={1}GB", usedMem, availMem);
long totalMentions = 0;
//foreach (var sql in SqlInterface.StoredSqlInterfaces)
foreach (var sql in ComputeNode.Node.Catalogs.Values)
{
var count = sql.GetCount(null);
totalMentions += count;
sub += "\r\n Subdomain:" + sql.Name + " Mentions:" + count;
}
sub += "\r\n Total Mentions:" + totalMentions;
results[id] = sub;
}
else
{
if (!isInternal)
{
using (var client = HostEnvironment.GetServiceClient(rie))
{
results[id] = client.Ping("status");
}
}
}
}
catch (Exception ex)
{
results[id] = "[Failed Status On Node] " + ex.Message;
}
},
reducer,
TaskCreationOptions.LongRunning);
statusTasks.Add(t);
t.Start();
}
Task.WaitAll(statusTasks.ToArray());
if (isInternal) return string.Join("\r\n", results.Values);
else return string.Join("\r\n", results.Select(x => "[@" + x.Key + "] ==> " + x.Value));
}
示例15: IndexReplicate
public HttpResponseMessage IndexReplicate([FromBody] ReplicationDestination replicationDestination)
{
var op = GetQueryStringValue("op");
if (string.Equals(op, "replicate-all", StringComparison.InvariantCultureIgnoreCase))
return ReplicateAllIndexes();
if (string.Equals(op, "replicate-all-to-destination", StringComparison.InvariantCultureIgnoreCase))
return ReplicateAllIndexes(dest => dest.IsEqualTo(replicationDestination));
var indexName = GetQueryStringValue("indexName");
if(indexName == null)
throw new InvalidOperationException("indexName query string must be specified if op=replicate-all or op=replicate-all-to-destination isn't specified");
//check for replication document before doing work on getting index definitions.
//if there is no replication set up --> no point in doing any other work
HttpResponseMessage erroResponseMessage;
var replicationDocument = GetReplicationDocument(out erroResponseMessage);
if (replicationDocument == null)
return erroResponseMessage;
if (indexName.EndsWith("/")) //since id is part of the url, perhaps a trailing forward slash appears there
indexName = indexName.Substring(0, indexName.Length - 1);
indexName = HttpUtility.UrlDecode(indexName);
var indexDefinition = Database.IndexDefinitionStorage.GetIndexDefinition(indexName);
if (indexDefinition == null)
{
return GetMessageWithObject(new
{
Message = string.Format("Index with name: {0} not found. Cannot proceed with replication...", indexName)
}, HttpStatusCode.NotFound);
}
var serializedIndexDefinition = RavenJObject.FromObject(indexDefinition);
var httpRavenRequestFactory = new HttpRavenRequestFactory { RequestTimeoutInMs = Database.Configuration.Replication.ReplicationRequestTimeoutInMilliseconds };
var failedDestinations = new ConcurrentDictionary<string, Exception>();
Parallel.ForEach(replicationDocument.Destinations.Where(dest => dest.Disabled == false && dest.SkipIndexReplication == false),
destination =>
{
try
{
ReplicateIndex(indexName, destination, serializedIndexDefinition, httpRavenRequestFactory);
}
catch (Exception e)
{
failedDestinations.TryAdd(destination.Humane ?? "<null?>", e);
log.WarnException("Could not replicate index " + indexName + " to " + destination.Humane, e);
}
});
return GetMessageWithObject(new
{
SuccessfulReplicationCount = (replicationDocument.Destinations.Count - failedDestinations.Count),
FailedDestinationUrls = failedDestinations.Select(x => new { Server = x.Key, Error = x.Value.ToString() }).ToArray()
});
}