mirror of
https://github.com/HChaZZY/Stockfish.git
synced 2025-12-20 00:56:39 +08:00
Use thread specific mutexes instead of a global one.
This is necessary to improve the scalability with high number of cores. There is no functional change in a single thread mode. Resolves #281
This commit is contained in:
committed by
Joona Kiiski
parent
4b59347194
commit
81c7975dcd
@@ -144,6 +144,8 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||
// Pick and init the next available split point
|
||||
SplitPoint& sp = splitPoints[splitPointsSize];
|
||||
|
||||
sp.mutex.lock(); // No contention here until we don't increment splitPointsSize
|
||||
|
||||
sp.master = this;
|
||||
sp.parentSplitPoint = activeSplitPoint;
|
||||
sp.slavesMask = 0, sp.slavesMask.set(idx);
|
||||
@@ -160,27 +162,29 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||
sp.nodes = 0;
|
||||
sp.cutoff = false;
|
||||
sp.ss = ss;
|
||||
|
||||
// Try to allocate available threads and ask them to start searching setting
|
||||
// 'searching' flag. This must be done under lock protection to avoid concurrent
|
||||
// allocation of the same slave by another master.
|
||||
Threads.mutex.lock();
|
||||
sp.mutex.lock();
|
||||
|
||||
sp.allSlavesSearching = true; // Must be set under lock protection
|
||||
|
||||
++splitPointsSize;
|
||||
activeSplitPoint = &sp;
|
||||
activePosition = nullptr;
|
||||
|
||||
// Try to allocate available threads
|
||||
Thread* slave;
|
||||
|
||||
while ( sp.slavesMask.count() < MAX_SLAVES_PER_SPLITPOINT
|
||||
&& (slave = Threads.available_slave(activeSplitPoint)) != nullptr)
|
||||
&& (slave = Threads.available_slave(&sp)) != nullptr)
|
||||
{
|
||||
sp.slavesMask.set(slave->idx);
|
||||
slave->activeSplitPoint = activeSplitPoint;
|
||||
slave->searching = true; // Slave leaves idle_loop()
|
||||
slave->notify_one(); // Could be sleeping
|
||||
slave->mutex.lock();
|
||||
|
||||
if (slave->can_join(activeSplitPoint))
|
||||
{
|
||||
activeSplitPoint->slavesMask.set(slave->idx);
|
||||
slave->activeSplitPoint = activeSplitPoint;
|
||||
slave->searching = true;
|
||||
slave->sleepCondition.notify_one(); // Could be sleeping
|
||||
}
|
||||
|
||||
slave->mutex.unlock();
|
||||
}
|
||||
|
||||
// Everything is set up. The master thread enters the idle loop, from which
|
||||
@@ -188,7 +192,6 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||
// The thread will return from the idle loop when all slaves have finished
|
||||
// their work at this split point.
|
||||
sp.mutex.unlock();
|
||||
Threads.mutex.unlock();
|
||||
|
||||
Thread::idle_loop(); // Force a call to base class idle_loop()
|
||||
|
||||
@@ -198,13 +201,13 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||
assert(!searching);
|
||||
assert(!activePosition);
|
||||
|
||||
searching = true;
|
||||
|
||||
// We have returned from the idle loop, which means that all threads are
|
||||
// finished. Note that setting 'searching' and decreasing splitPointsSize must
|
||||
// be done under lock protection to avoid a race with Thread::available_to().
|
||||
Threads.mutex.lock();
|
||||
// finished. Note that decreasing splitPointsSize must be done under lock
|
||||
// protection to avoid a race with Thread::can_join().
|
||||
sp.mutex.lock();
|
||||
|
||||
searching = true;
|
||||
--splitPointsSize;
|
||||
activeSplitPoint = sp.parentSplitPoint;
|
||||
activePosition = &pos;
|
||||
@@ -213,7 +216,6 @@ void Thread::split(Position& pos, Stack* ss, Value alpha, Value beta, Value* bes
|
||||
*bestValue = sp.bestValue;
|
||||
|
||||
sp.mutex.unlock();
|
||||
Threads.mutex.unlock();
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user