本文整理汇总了C++中ThreadPool::available_slave方法的典型用法代码示例。如果您正苦于以下问题:C++ ThreadPool::available_slave方法的具体用法?C++ ThreadPool::available_slave怎么用?C++ ThreadPool::available_slave使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ThreadPool
的用法示例。
在下文中一共展示了ThreadPool::available_slave方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: split
void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bestValue,
Move* bestMove, Depth depth, Move threatMove, int moveCount,
MovePicker* movePicker, int nodeType) {
assert(pos.pos_is_ok());
assert(*bestValue <= alpha && alpha < beta && beta <= VALUE_INFINITE);
assert(*bestValue > -VALUE_INFINITE);
assert(depth >= Threads.minimumSplitDepth);
assert(searching);
assert(splitPointsSize < MAX_SPLITPOINTS_PER_THREAD);
// Pick the next available split point from the split point stack
SplitPoint& sp = splitPoints[splitPointsSize];
sp.masterThread = this;
sp.parentSplitPoint = activeSplitPoint;
sp.slavesMask = 1ULL << idx;
sp.depth = depth;
sp.bestValue = *bestValue;
sp.bestMove = *bestMove;
sp.threatMove = threatMove;
sp.alpha = alpha;
sp.beta = beta;
sp.nodeType = nodeType;
sp.movePicker = movePicker;
sp.moveCount = moveCount;
sp.pos = &pos;
sp.nodes = 0;
sp.cutoff = false;
sp.ss = ss;
// Try to allocate available threads and ask them to start searching setting
// 'searching' flag. This must be done under lock protection to avoid concurrent
// allocation of the same slave by another master.
Threads.mutex.lock();
sp.mutex.lock();
splitPointsSize++;
activeSplitPoint = &sp;
activePosition = NULL;
size_t slavesCnt = 1; // This thread is always included
Thread* slave;
while ( (slave = Threads.available_slave(this)) != NULL
&& ++slavesCnt <= Threads.maxThreadsPerSplitPoint && !Fake)
{
sp.slavesMask |= 1ULL << slave->idx;
slave->activeSplitPoint = &sp;
slave->searching = true; // Slave leaves idle_loop()
slave->notify_one(); // Could be sleeping
}
// Everything is set up. The master thread enters the idle loop, from which
// it will instantly launch a search, because its 'searching' flag is set.
// The thread will return from the idle loop when all slaves have finished
// their work at this split point.
if (slavesCnt > 1 || Fake)
{
sp.mutex.unlock();
Threads.mutex.unlock();
Thread::idle_loop(); // Force a call to base class idle_loop()
// In helpful master concept a master can help only a sub-tree of its split
// point, and because here is all finished is not possible master is booked.
assert(!searching);
assert(!activePosition);
// We have returned from the idle loop, which means that all threads are
// finished. Note that setting 'searching' and decreasing splitPointsSize is
// done under lock protection to avoid a race with Thread::is_available_to().
Threads.mutex.lock();
sp.mutex.lock();
}
searching = true;
splitPointsSize--;
activeSplitPoint = sp.parentSplitPoint;
activePosition = &pos;
pos.set_nodes_searched(pos.nodes_searched() + sp.nodes);
*bestMove = sp.bestMove;
*bestValue = sp.bestValue;
sp.mutex.unlock();
Threads.mutex.unlock();
}
示例2: split
void Thread::split(Position& pos, const Stack* ss, Value alpha, Value beta, Value* bestValue,
Move* bestMove, Depth depth, int moveCount,
MovePicker* movePicker, int nodeType, bool cutNode) {
assert(pos.pos_is_ok());
assert(-VALUE_INFINITE < *bestValue && *bestValue <= alpha && alpha < beta && beta <= VALUE_INFINITE);
assert(depth >= Threads.minimumSplitDepth);
assert(searching);
assert(splitPointsSize < MAX_SPLITPOINTS_PER_THREAD);
// Pick the next available split point from the split point stack
SplitPoint& sp = splitPoints[splitPointsSize];
sp.masterThread = this;
sp.parentSplitPoint = activeSplitPoint;
sp.slavesMask = 0, sp.slavesMask.set(idx);
sp.depth = depth;
sp.bestValue = *bestValue;
sp.bestMove = *bestMove;
sp.alpha = alpha;
sp.beta = beta;
sp.nodeType = nodeType;
sp.cutNode = cutNode;
sp.movePicker = movePicker;
sp.moveCount = moveCount;
sp.pos = &pos;
sp.nodes = 0;
sp.cutoff = false;
sp.ss = ss;
// Try to allocate available threads and ask them to start searching setting
// 'searching' flag. This must be done under lock protection to avoid concurrent
// allocation of the same slave by another master.
Threads.mutex.lock();
sp.mutex.lock();
sp.allSlavesSearching = true; // Must be set under lock protection
++splitPointsSize;
activeSplitPoint = &sp;
activePosition = nullptr;
if (!Fake)
for (Thread* slave; (slave = Threads.available_slave(this)) != nullptr; )
{
sp.slavesMask.set(slave->idx);
slave->activeSplitPoint = &sp;
slave->searching = true; // Slave leaves idle_loop()
slave->notify_one(); // Could be sleeping
}
// Everything is set up. The master thread enters the idle loop, from which
// it will instantly launch a search, because its 'searching' flag is set.
// The thread will return from the idle loop when all slaves have finished
// their work at this split point.
sp.mutex.unlock();
Threads.mutex.unlock();
Thread::idle_loop(); // Force a call to base class idle_loop()
// In the helpful master concept, a master can help only a sub-tree of its
// split point and because everything is finished here, it's not possible
// for the master to be booked.
assert(!searching);
assert(!activePosition);
// We have returned from the idle loop, which means that all threads are
// finished. Note that setting 'searching' and decreasing splitPointsSize is
// done under lock protection to avoid a race with Thread::available_to().
Threads.mutex.lock();
sp.mutex.lock();
searching = true;
--splitPointsSize;
activeSplitPoint = sp.parentSplitPoint;
activePosition = &pos;
pos.set_nodes_searched(pos.nodes_searched() + sp.nodes);
*bestMove = sp.bestMove;
*bestValue = sp.bestValue;
sp.mutex.unlock();
Threads.mutex.unlock();
}
示例3: split
void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bestValue,
Move* bestMove, Depth depth, int moveCount,
MovePicker* movePicker, int nodeType, bool cutNode) {
assert(searching);
assert(-VALUE_INFINITE < *bestValue && *bestValue <= alpha && alpha < beta && beta <= VALUE_INFINITE);
assert(depth >= Threads.minimumSplitDepth);
assert(splitPointsSize < MAX_SPLITPOINTS_PER_THREAD);
// Pick and init the next available split point
SplitPoint& sp = splitPoints[splitPointsSize];
sp.spinlock.acquire(); // No contention here until we don't increment splitPointsSize
sp.master = this;
sp.parentSplitPoint = activeSplitPoint;
sp.slavesMask = 0, sp.slavesMask.set(idx);
sp.depth = depth;
sp.bestValue = *bestValue;
sp.bestMove = *bestMove;
sp.alpha = alpha;
sp.beta = beta;
sp.nodeType = nodeType;
sp.cutNode = cutNode;
sp.movePicker = movePicker;
sp.moveCount = moveCount;
sp.pos = &pos;
sp.nodes = 0;
sp.cutoff = false;
sp.ss = ss;
sp.allSlavesSearching = true; // Must be set under lock protection
++splitPointsSize;
activeSplitPoint = &sp;
activePosition = nullptr;
// Try to allocate available threads
Thread* slave;
while ( sp.slavesMask.count() < Threads.max_slaves_per_splitpoint(depth)
&& (slave = Threads.available_slave(&sp)) != nullptr)
{
slave->spinlock.acquire();
if (slave->can_join(activeSplitPoint))
{
activeSplitPoint->slavesMask.set(slave->idx);
slave->activeSplitPoint = activeSplitPoint;
slave->searching = true;
}
slave->spinlock.release();
}
// Everything is set up. The master thread enters the idle loop, from which
// it will instantly launch a search, because its 'searching' flag is set.
// The thread will return from the idle loop when all slaves have finished
// their work at this split point.
sp.spinlock.release();
Thread::idle_loop(); // Force a call to base class idle_loop()
// In the helpful master concept, a master can help only a sub-tree of its
// split point and because everything is finished here, it's not possible
// for the master to be booked.
assert(!searching);
assert(!activePosition);
// We have returned from the idle loop, which means that all threads are
// finished. Note that decreasing splitPointsSize must be done under lock
// protection to avoid a race with Thread::can_join().
spinlock.acquire();
searching = true;
--splitPointsSize;
activeSplitPoint = sp.parentSplitPoint;
activePosition = &pos;
spinlock.release();
// Split point data cannot be changed now, so no need to lock protect
pos.set_nodes_searched(pos.nodes_searched() + sp.nodes);
*bestMove = sp.bestMove;
*bestValue = sp.bestValue;
}