本文整理汇总了C++中ConditionVariable::Unlock方法的典型用法代码示例。如果您正苦于以下问题:C++ ConditionVariable::Unlock方法的具体用法?C++ ConditionVariable::Unlock怎么用?C++ ConditionVariable::Unlock使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ConditionVariable
的用法示例。
在下文中一共展示了ConditionVariable::Unlock方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: RunParallel
void Algorithm::RunParallel(set<Algorithm*> algos, Graph& G, vector<string> parameters,
float MaxApproximationDistance, float MinCorrectnessProbability)
{
set<Algorithm*> SelectedAlgorithms;
for(set<Algorithm*>::iterator i = algos.begin(); i != algos.end(); i++)
if((*i)->SuitableFor(G)
&& (*i)->CanGuaranteeApproximationDistance(G, MaxApproximationDistance)
&& (*i)->CanGuaranteeCorrectnessProbability(G, MinCorrectnessProbability))
SelectedAlgorithms.insert(*i);
if(SelectedAlgorithms.size() == 0)
{
throw "No suitable algorithm found";
}
else if(SelectedAlgorithms.size() == 1) // we have 1 algorithm => no multithreading needed
{
Algorithm* algo = *SelectedAlgorithms.begin();
algo->Run(G, parameters);
}
else
{
// we have more than 1 algorithm => run them in parallel
// give each algorithm its own copy of G
map<Thread*, Graph*> GraphCopies;
for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++)
GraphCopies[*i] = new Graph(G);
ConditionVariable synchronize;
Thread* finishedAlgorithm = NULL;
synchronize.Lock();
cerr << "starting " << SelectedAlgorithms.size() << " of " << algos.size() << " algorithms\n";
for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++)
(*i)->RunInThread(GraphCopies[*i], parameters, &synchronize, &finishedAlgorithm);
while(finishedAlgorithm == NULL) // a mislead interrupt can cause the Wait to stop, therefore
synchronize.Wait(); // this has to be in a loop that checks whether someone has actually finished
G = *(GraphCopies[finishedAlgorithm]);
cerr << "someone finished. sending termination requests\n";
for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++)
(*i)->Terminate();
synchronize.Unlock();
cerr << "waiting for threads to join\n";
for(set<Algorithm*>::iterator i = SelectedAlgorithms.begin(); i != SelectedAlgorithms.end(); i++)
{
(*i)->Join();
delete GraphCopies[*i];
}
GraphCopies.clear();
cerr << "everyone joined\n";
}
}
示例2: WaitForAllTasks
static DWORD WINAPI taskEntry(LPVOID arg) {
#else
static void *taskEntry(void *arg) {
#endif
while (true) {
workerSemaphore.Wait();
// Try to get task from task queue
Task *myTask = NULL;
{ MutexLock lock(*taskQueueMutex);
if (taskQueue.size() == 0)
break;
myTask = taskQueue.back();
taskQueue.pop_back();
}
// Do work for _myTask_
PBRT_STARTED_TASK(myTask);
myTask->Run();
PBRT_FINISHED_TASK(myTask);
tasksRunningCondition.Lock();
int unfinished = --numUnfinishedTasks;
if (unfinished == 0)
tasksRunningCondition.Signal();
tasksRunningCondition.Unlock();
}
// Cleanup from task thread and exit
#ifdef PBRT_HAS_PTHREADS
pthread_exit(NULL);
#endif // PBRT_HAS_PTHREADS
return 0;
}
#endif // !PBRT_USE_GRAND_CENTRAL_DISPATCH
void WaitForAllTasks() {
#ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH
dispatch_group_wait(gcdGroup, DISPATCH_TIME_FOREVER);
#else
tasksRunningCondition.Lock();
while (numUnfinishedTasks > 0)
tasksRunningCondition.Wait();
tasksRunningCondition.Unlock();
#endif
}
示例3: EnqueueTasks
void EnqueueTasks(const vector<Task *> &tasks) {
if (PbrtOptions.nCores == 1) {
for (unsigned int i = 0; i < tasks.size(); ++i)
tasks[i]->Run();
return;
}
#ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH
for (uint32_t i = 0; i < tasks.size(); ++i)
dispatch_group_async_f(gcdGroup, gcdQueue, tasks[i], lRunTask);
#else
if (!threads)
TasksInit();
{ MutexLock lock(*taskQueueMutex);
for (unsigned int i = 0; i < tasks.size(); ++i)
taskQueue.push_back(tasks[i]);
}
tasksRunningCondition.Lock();
numUnfinishedTasks += tasks.size();
tasksRunningCondition.Unlock();
workerSemaphore.Post(tasks.size());
#endif
}
示例4: EnqueueTasks
void EnqueueTasks(const vector<Task *> &tasks) {
#ifdef PBRT_USE_GRAND_CENTRAL_DISPATCH
static bool oneThread = (getenv("PBRT_NTHREADS") &&
atoi(getenv("PBRT_NTHREADS")) == 1);
for (u_int i = 0; i < tasks.size(); ++i)
if (oneThread)
dispatch_sync_f(gcdQueue, tasks[i], lRunTask);
else
dispatch_group_async_f(gcdGroup, gcdQueue, tasks[i], lRunTask);
#else
if (!threads)
TasksInit();
{ MutexLock lock(*taskQueueMutex);
for (unsigned int i = 0; i < tasks.size(); ++i)
taskQueue.push_back(tasks[i]);
}
tasksRunningCondition.Lock();
numUnfinishedTasks += tasks.size();
tasksRunningCondition.Unlock();
workerSemaphore.Post(tasks.size());
#endif
}