Introduce yielding spin locks

Idea and original implementation by Stephane Nicolet

7 threads 15+0.05
ELO: 3.54 +-2.9 (95%) LOS: 99.2%
Total: 17971 W: 2976 L: 2793 D: 12202

There is no functional change in single thread mode
This commit is contained in:
Joona Kiiski
2015-03-12 20:36:32 +00:00
parent 558b0c848c
commit d71f707040
3 changed files with 21 additions and 6 deletions

View File

@@ -40,6 +40,19 @@ const size_t MAX_THREADS = 128;
const size_t MAX_SPLITPOINTS_PER_THREAD = 8;
const size_t MAX_SLAVES_PER_SPLITPOINT = 4;
class Spinlock {
std::atomic_int _lock;
public:
Spinlock() { _lock = 1; } // Init here to workaround a bug with MSVC 2013
void lock() {
while (_lock.fetch_sub(1, std::memory_order_acquire) != 1)
for (int cnt = 0; _lock.load(std::memory_order_relaxed) <= 0; ++cnt)
if (cnt >= 10000) std::this_thread::yield(); // Be nice to hyperthreading
}
void unlock() { _lock.store(1, std::memory_order_release); }
};
/// SplitPoint struct stores information shared by the threads searching in
/// parallel below the same split point. It is populated at splitting time.
@@ -60,7 +73,7 @@ struct SplitPoint {
SplitPoint* parentSplitPoint;
// Shared variable data
Mutex mutex;
Spinlock mutex;
std::bitset<MAX_THREADS> slavesMask;
volatile bool allSlavesSearching;
volatile uint64_t nodes;
@@ -114,6 +127,7 @@ struct Thread : public ThreadBase {
SplitPoint* volatile activeSplitPoint;
volatile size_t splitPointsSize;
volatile bool searching;
Spinlock allocMutex;
};