From 8559c439148d0f183a5d67375c12abe92d63975e Mon Sep 17 00:00:00 2001 From: Unai Corzo Date: Sun, 20 Sep 2020 09:03:37 +0200 Subject: [PATCH 1/4] Simplify reduced depth search Simplification in reduced depth search. STC https://tests.stockfishchess.org/tests/view/5f64c72fbb0cae038ca8f531 LLR: 2.94 (-2.94,2.94) {-1.25,0.25} Total: 28320 W: 3475 L: 3359 D: 21486 Ptnml(0-2): 170, 2485, 8773, 2523, 209 LTC https://tests.stockfishchess.org/tests/view/5f650cfabb0cae038ca8f585 LLR: 2.95 (-2.94,2.94) {-0.75,0.25} Total: 58392 W: 3354 L: 3285 D: 51753 Ptnml(0-2): 74, 2826, 23336, 2877, 83 closes https://github.com/official-stockfish/Stockfish/pull/3139 bench: 4201295 --- src/search.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/search.cpp b/src/search.cpp index 9c5fb58b..22cb8577 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -1151,7 +1151,7 @@ moves_loop: // When in check, search starts from here // Step 16. Reduced depth search (LMR, ~200 Elo). If the move fails high it will be // re-searched at full depth. if ( depth >= 3 - && moveCount > 1 + 2 * rootNode + 2 * (PvNode && abs(bestValue) < 2) + && moveCount > 1 + 2 * rootNode && ( !captureOrPromotion || moveCountPruning || ss->staticEval + PieceValue[EG][pos.captured_piece()] <= alpha From 16b4578cc1bb0cc0dead19e7d9248553c977f8ca Mon Sep 17 00:00:00 2001 From: Stefan Geschwentner Date: Sun, 20 Sep 2020 22:25:19 +0200 Subject: [PATCH 2/4] Tweak hybrid treshold. Increase the first hybrid threshold with more material. Rewrite the hybrid rules for clarity. STC: LLR: 2.94 (-2.94,2.94) {-0.25,1.25} Total: 24416 W: 3039 L: 2848 D: 18529 Ptnml(0-2): 135, 2136, 7503, 2271, 163 https://tests.stockfishchess.org/tests/view/5f6451efbb0cae038ca8f4dc LTC; LLR: 2.95 (-2.94,2.94) {0.25,1.25} Total: 65016 W: 3702 L: 3455 D: 57859 Ptnml(0-2): 66, 2991, 26157, 3218, 76 https://tests.stockfishchess.org/tests/view/5f64b143bb0cae038ca8f51f closes https://github.com/official-stockfish/Stockfish/pull/3140 Bench: 3973739 --- src/evaluate.cpp | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/src/evaluate.cpp b/src/evaluate.cpp index faf71d27..a9159477 100644 --- a/src/evaluate.cpp +++ b/src/evaluate.cpp @@ -1015,20 +1015,28 @@ make_v: Value Eval::evaluate(const Position& pos) { - // Use classical eval if there is a large imbalance - // If there is a moderate imbalance, use classical eval with probability (1/8), - // as derived from the node counter. - bool useClassical = abs(eg_value(pos.psq_score())) * 16 > NNUEThreshold1 * (16 + pos.rule50_count()); - bool classical = !Eval::useNNUE - || useClassical - || (abs(eg_value(pos.psq_score())) > PawnValueMg / 4 && !(pos.this_thread()->nodes & 0xB)); - Value v = classical ? Evaluation(pos).value() - : NNUE::evaluate(pos) * 5 / 4 + Tempo; + Value v; - if ( useClassical - && Eval::useNNUE - && abs(v) * 16 < NNUEThreshold2 * (16 + pos.rule50_count())) - v = NNUE::evaluate(pos) * 5 / 4 + Tempo; + if (!Eval::useNNUE) + v = Evaluation(pos).value(); + else + { + // scale and shift NNUE for compatibility with search and classical evaluation + auto adjusted_NNUE = [&](){ return NNUE::evaluate(pos) * 5 / 4 + Tempo; }; + + // if there is PSQ imbalance use classical eval, with small probability if it is small + Value psq = Value(abs(eg_value(pos.psq_score()))); + int r50 = 16 + pos.rule50_count(); + bool largePsq = psq * 16 > (NNUEThreshold1 + pos.non_pawn_material() / 64) * r50; + bool classical = largePsq || (psq > PawnValueMg / 4 && !(pos.this_thread()->nodes & 0xB)); + + v = classical ? Evaluation(pos).value() : adjusted_NNUE(); + + // if the classical eval is small and imbalance large, use NNUE nevertheless. + if ( largePsq + && abs(v) * 16 < NNUEThreshold2 * r50) + v = adjusted_NNUE(); + } // Damp down the evaluation linearly when shuffling v = v * (100 - pos.rule50_count()) / 100; From 485d517c687a2d3cb0b88cc8c198483759eaf2c7 Mon Sep 17 00:00:00 2001 From: Sami Kiminki Date: Sun, 30 Aug 2020 19:41:30 +0300 Subject: [PATCH 3/4] Add large page support for NNUE weights and simplify TT mem management Use TT memory functions to allocate memory for the NNUE weights. This should provide a small speed-up on systems where large pages are not automatically used, including Windows and some Linux distributions. Further, since we now have a wrapper for std::aligned_alloc(), we can simplify the TT memory management a bit: - We no longer need to store separate pointers to the hash table and its underlying memory allocation. - We also get to merge the Linux-specific and default implementations of aligned_ttmem_alloc(). Finally, we'll enable the VirtualAlloc code path with large page support also for Win32. STC: https://tests.stockfishchess.org/tests/view/5f66595823a84a47b9036fba LLR: 2.94 (-2.94,2.94) {-0.25,1.25} Total: 14896 W: 1854 L: 1686 D: 11356 Ptnml(0-2): 65, 1224, 4742, 1312, 105 closes https://github.com/official-stockfish/Stockfish/pull/3081 No functional change. --- README.md | 2 +- src/misc.cpp | 57 +++++++++++++++++--------------------- src/misc.h | 4 +-- src/nnue/evaluate_nnue.cpp | 18 ++++++++---- src/nnue/evaluate_nnue.h | 11 ++++++++ src/tt.cpp | 7 +++-- src/tt.h | 3 +- 7 files changed, 57 insertions(+), 45 deletions(-) diff --git a/README.md b/README.md index 96a495ae..255ebce2 100644 --- a/README.md +++ b/README.md @@ -152,7 +152,7 @@ to find the best move. The classical evaluation computes this value as a functio of various chess concepts, handcrafted by experts, tested and tuned using fishtest. The NNUE evaluation computes this value with a neural network based on basic inputs (e.g. piece positions only). The network is optimized and trained -on the evalutions of millions of positions at moderate search depth. +on the evaluations of millions of positions at moderate search depth. The NNUE evaluation was first introduced in shogi, and ported to Stockfish afterward. It can be evaluated efficiently on CPUs, and exploits the fact that only parts diff --git a/src/misc.cpp b/src/misc.cpp index 3fbdea35..d9bc47e3 100644 --- a/src/misc.cpp +++ b/src/misc.cpp @@ -357,27 +357,11 @@ void std_aligned_free(void* ptr) { #endif } -/// aligned_ttmem_alloc() will return suitably aligned memory, if possible using large pages. -/// The returned pointer is the aligned one, while the mem argument is the one that needs -/// to be passed to free. With c++17 some of this functionality could be simplified. +/// aligned_large_pages_alloc() will return suitably aligned memory, if possible using large pages. -#if defined(__linux__) && !defined(__ANDROID__) +#if defined(_WIN32) -void* aligned_ttmem_alloc(size_t allocSize, void*& mem) { - - constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page sizes - size_t size = ((allocSize + alignment - 1) / alignment) * alignment; // multiple of alignment - if (posix_memalign(&mem, alignment, size)) - mem = nullptr; -#if defined(MADV_HUGEPAGE) - madvise(mem, allocSize, MADV_HUGEPAGE); -#endif - return mem; -} - -#elif defined(_WIN64) - -static void* aligned_ttmem_alloc_large_pages(size_t allocSize) { +static void* aligned_large_pages_alloc_win(size_t allocSize) { HANDLE hProcessToken { }; LUID luid { }; @@ -422,12 +406,13 @@ static void* aligned_ttmem_alloc_large_pages(size_t allocSize) { return mem; } -void* aligned_ttmem_alloc(size_t allocSize, void*& mem) { +void* aligned_large_pages_alloc(size_t allocSize) { static bool firstCall = true; + void* mem; // Try to allocate large pages - mem = aligned_ttmem_alloc_large_pages(allocSize); + mem = aligned_large_pages_alloc_win(allocSize); // Suppress info strings on the first call. The first call occurs before 'uci' // is received and in that case this output confuses some GUIs. @@ -449,23 +434,31 @@ void* aligned_ttmem_alloc(size_t allocSize, void*& mem) { #else -void* aligned_ttmem_alloc(size_t allocSize, void*& mem) { +void* aligned_large_pages_alloc(size_t allocSize) { - constexpr size_t alignment = 64; // assumed cache line size - size_t size = allocSize + alignment - 1; // allocate some extra space - mem = malloc(size); - void* ret = reinterpret_cast((uintptr_t(mem) + alignment - 1) & ~uintptr_t(alignment - 1)); - return ret; +#if defined(__linux__) + constexpr size_t alignment = 2 * 1024 * 1024; // assumed 2MB page size +#else + constexpr size_t alignment = 4096; // assumed small page size +#endif + + // round up to multiples of alignment + size_t size = ((allocSize + alignment - 1) / alignment) * alignment; + void *mem = std_aligned_alloc(alignment, size); +#if defined(MADV_HUGEPAGE) + madvise(mem, size, MADV_HUGEPAGE); +#endif + return mem; } #endif -/// aligned_ttmem_free() will free the previously allocated ttmem +/// aligned_large_pages_free() will free the previously allocated ttmem -#if defined(_WIN64) +#if defined(_WIN32) -void aligned_ttmem_free(void* mem) { +void aligned_large_pages_free(void* mem) { if (mem && !VirtualFree(mem, 0, MEM_RELEASE)) { @@ -478,8 +471,8 @@ void aligned_ttmem_free(void* mem) { #else -void aligned_ttmem_free(void *mem) { - free(mem); +void aligned_large_pages_free(void *mem) { + std_aligned_free(mem); } #endif diff --git a/src/misc.h b/src/misc.h index 68b9c884..bc48f303 100644 --- a/src/misc.h +++ b/src/misc.h @@ -33,8 +33,8 @@ void prefetch(void* addr); void start_logger(const std::string& fname); void* std_aligned_alloc(size_t alignment, size_t size); void std_aligned_free(void* ptr); -void* aligned_ttmem_alloc(size_t size, void*& mem); -void aligned_ttmem_free(void* mem); // nop if mem == nullptr +void* aligned_large_pages_alloc(size_t size); // memory aligned by page size, min alignment: 4096 bytes +void aligned_large_pages_free(void* mem); // nop if mem == nullptr void dbg_hit_on(bool b); void dbg_hit_on(bool c, bool b); diff --git a/src/nnue/evaluate_nnue.cpp b/src/nnue/evaluate_nnue.cpp index ed138881..72d18200 100644 --- a/src/nnue/evaluate_nnue.cpp +++ b/src/nnue/evaluate_nnue.cpp @@ -52,7 +52,7 @@ namespace Eval::NNUE { }; // Input feature converter - AlignedPtr feature_transformer; + LargePagePtr feature_transformer; // Evaluation function AlignedPtr network; @@ -70,14 +70,22 @@ namespace Eval::NNUE { std::memset(pointer.get(), 0, sizeof(T)); } + template + void Initialize(LargePagePtr& pointer) { + + static_assert(alignof(T) <= 4096, "aligned_large_pages_alloc() may fail for such a big alignment requirement of T"); + pointer.reset(reinterpret_cast(aligned_large_pages_alloc(sizeof(T)))); + std::memset(pointer.get(), 0, sizeof(T)); + } + // Read evaluation function parameters template - bool ReadParameters(std::istream& stream, const AlignedPtr& pointer) { + bool ReadParameters(std::istream& stream, T& reference) { std::uint32_t header; header = read_little_endian(stream); if (!stream || header != T::GetHashValue()) return false; - return pointer->ReadParameters(stream); + return reference.ReadParameters(stream); } } // namespace Detail @@ -110,8 +118,8 @@ namespace Eval::NNUE { std::string architecture; if (!ReadHeader(stream, &hash_value, &architecture)) return false; if (hash_value != kHashValue) return false; - if (!Detail::ReadParameters(stream, feature_transformer)) return false; - if (!Detail::ReadParameters(stream, network)) return false; + if (!Detail::ReadParameters(stream, *feature_transformer)) return false; + if (!Detail::ReadParameters(stream, *network)) return false; return stream && stream.peek() == std::ios::traits_type::eof(); } diff --git a/src/nnue/evaluate_nnue.h b/src/nnue/evaluate_nnue.h index 5f0d1855..459a93de 100644 --- a/src/nnue/evaluate_nnue.h +++ b/src/nnue/evaluate_nnue.h @@ -40,9 +40,20 @@ namespace Eval::NNUE { } }; + template + struct TtmemDeleter { + void operator()(T* ptr) const { + ptr->~T(); + aligned_large_pages_free(ptr); + } + }; + template using AlignedPtr = std::unique_ptr>; + template + using LargePagePtr = std::unique_ptr>; + } // namespace Eval::NNUE #endif // #ifndef NNUE_EVALUATE_NNUE_H_INCLUDED diff --git a/src/tt.cpp b/src/tt.cpp index 60a3a5f1..dea7c712 100644 --- a/src/tt.cpp +++ b/src/tt.cpp @@ -62,11 +62,12 @@ void TranspositionTable::resize(size_t mbSize) { Threads.main()->wait_for_search_finished(); - aligned_ttmem_free(mem); + aligned_large_pages_free(table); clusterCount = mbSize * 1024 * 1024 / sizeof(Cluster); - table = static_cast(aligned_ttmem_alloc(clusterCount * sizeof(Cluster), mem)); - if (!mem) + + table = static_cast(aligned_large_pages_alloc(clusterCount * sizeof(Cluster))); + if (!table) { std::cerr << "Failed to allocate " << mbSize << "MB for transposition table." << std::endl; diff --git a/src/tt.h b/src/tt.h index fdfd6769..6aa066c5 100644 --- a/src/tt.h +++ b/src/tt.h @@ -73,7 +73,7 @@ class TranspositionTable { static_assert(sizeof(Cluster) == 32, "Unexpected Cluster size"); public: - ~TranspositionTable() { aligned_ttmem_free(mem); } + ~TranspositionTable() { aligned_large_pages_free(table); } void new_search() { generation8 += 8; } // Lower 3 bits are used by PV flag and Bound TTEntry* probe(const Key key, bool& found) const; int hashfull() const; @@ -89,7 +89,6 @@ private: size_t clusterCount; Cluster* table; - void* mem; uint8_t generation8; // Size must be not bigger than TTEntry::genBound8 }; From 9a64e737cfef639f202787161498ba94466ad730 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ste=CC=81phane=20Nicolet?= Date: Wed, 9 Sep 2020 10:49:31 +0200 Subject: [PATCH 4/4] Small cleanups 12 - Clean signature of functions in namespace NNUE - Add comment for countermove based pruning - Remove bestMoveCount variable - Add const qualifier to kpp_board_index array - Fix spaces in get_best_thread() - Fix indention in capture LMR code in search.cpp - Rename TtmemDeleter to LargePageDeleter Closes https://github.com/official-stockfish/Stockfish/pull/3063 No functional change --- src/evaluate.cpp | 8 ++++---- src/evaluate.h | 8 +++----- src/main.cpp | 2 +- src/nnue/evaluate_nnue.cpp | 6 +++--- src/nnue/evaluate_nnue.h | 4 ++-- src/nnue/nnue_common.h | 2 +- src/search.cpp | 20 +++++++++----------- src/search.h | 1 - src/thread.cpp | 20 ++++++++++---------- src/uci.cpp | 2 +- src/ucioption.cpp | 4 ++-- 11 files changed, 36 insertions(+), 41 deletions(-) diff --git a/src/evaluate.cpp b/src/evaluate.cpp index a9159477..d3937823 100644 --- a/src/evaluate.cpp +++ b/src/evaluate.cpp @@ -60,7 +60,7 @@ namespace Eval { bool useNNUE; string eval_file_loaded = "None"; - /// init_NNUE() tries to load a nnue network at startup time, or when the engine + /// NNUE::init() tries to load a nnue network at startup time, or when the engine /// receives a UCI command "setoption name EvalFile value nn-[a-z0-9]{12}.nnue" /// The name of the nnue network is always retrieved from the EvalFile option. /// We search the given network in three locations: internally (the default @@ -68,7 +68,7 @@ namespace Eval { /// in the engine directory. Distro packagers may define the DEFAULT_NNUE_DIRECTORY /// variable to have the engine search in a special directory in their distro. - void init_NNUE() { + void NNUE::init() { useNNUE = Options["Use NNUE"]; if (!useNNUE) @@ -111,8 +111,8 @@ namespace Eval { } } - /// verify_NNUE() verifies that the last net used was loaded successfully - void verify_NNUE() { + /// NNUE::verify() verifies that the last net used was loaded successfully + void NNUE::verify() { string eval_file = string(Options["EvalFile"]); diff --git a/src/evaluate.h b/src/evaluate.h index c723bd8f..56354cf5 100644 --- a/src/evaluate.h +++ b/src/evaluate.h @@ -32,8 +32,6 @@ namespace Eval { extern bool useNNUE; extern std::string eval_file_loaded; - void init_NNUE(); - void verify_NNUE(); // The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue // for the build process (profile-build and fishtest) to work. Do not change the @@ -43,9 +41,9 @@ namespace Eval { namespace NNUE { Value evaluate(const Position& pos); - Value compute_eval(const Position& pos); - void update_eval(const Position& pos); - bool load_eval(std::string streamName, std::istream& stream); + bool load_eval(std::string name, std::istream& stream); + void init(); + void verify(); } // namespace NNUE diff --git a/src/main.cpp b/src/main.cpp index f95db1c2..e6dff918 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -45,7 +45,7 @@ int main(int argc, char* argv[]) { Endgames::init(); Threads.set(size_t(Options["Threads"])); Search::clear(); // After threads are up - Eval::init_NNUE(); + Eval::NNUE::init(); UCI::loop(argc, argv); diff --git a/src/nnue/evaluate_nnue.cpp b/src/nnue/evaluate_nnue.cpp index 72d18200..b5dcd992 100644 --- a/src/nnue/evaluate_nnue.cpp +++ b/src/nnue/evaluate_nnue.cpp @@ -30,7 +30,7 @@ namespace Eval::NNUE { - uint32_t kpp_board_index[PIECE_NB][COLOR_NB] = { + const uint32_t kpp_board_index[PIECE_NB][COLOR_NB] = { // convention: W - us, B - them // viewed from other side, W and B are reversed { PS_NONE, PS_NONE }, @@ -136,10 +136,10 @@ namespace Eval::NNUE { } // Load eval, from a file stream or a memory stream - bool load_eval(std::string streamName, std::istream& stream) { + bool load_eval(std::string name, std::istream& stream) { Initialize(); - fileName = streamName; + fileName = name; return ReadParameters(stream); } diff --git a/src/nnue/evaluate_nnue.h b/src/nnue/evaluate_nnue.h index 459a93de..6cacf37e 100644 --- a/src/nnue/evaluate_nnue.h +++ b/src/nnue/evaluate_nnue.h @@ -41,7 +41,7 @@ namespace Eval::NNUE { }; template - struct TtmemDeleter { + struct LargePageDeleter { void operator()(T* ptr) const { ptr->~T(); aligned_large_pages_free(ptr); @@ -52,7 +52,7 @@ namespace Eval::NNUE { using AlignedPtr = std::unique_ptr>; template - using LargePagePtr = std::unique_ptr>; + using LargePagePtr = std::unique_ptr>; } // namespace Eval::NNUE diff --git a/src/nnue/nnue_common.h b/src/nnue/nnue_common.h index 7bc905dc..8afea186 100644 --- a/src/nnue/nnue_common.h +++ b/src/nnue/nnue_common.h @@ -113,7 +113,7 @@ namespace Eval::NNUE { PS_END2 = 12 * SQUARE_NB + 1 }; - extern uint32_t kpp_board_index[PIECE_NB][COLOR_NB]; + extern const uint32_t kpp_board_index[PIECE_NB][COLOR_NB]; // Type of input feature after conversion using TransformedFeatureType = std::uint8_t; diff --git a/src/search.cpp b/src/search.cpp index 22cb8577..edc020fd 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -225,7 +225,7 @@ void MainThread::search() { Time.init(Limits, us, rootPos.game_ply()); TT.new_search(); - Eval::verify_NNUE(); + Eval::NNUE::verify(); if (rootMoves.empty()) { @@ -462,10 +462,7 @@ void Thread::search() { ++failedHighCnt; } else - { - ++rootMoves[pvIdx].bestMoveCount; break; - } delta += delta / 4 + 5; @@ -1218,14 +1215,14 @@ moves_loop: // When in check, search starts from here } else { - // Increase reduction for captures/promotions if late move and at low depth - if (depth < 8 && moveCount > 2) - r++; + // Increase reduction for captures/promotions if late move and at low depth + if (depth < 8 && moveCount > 2) + r++; - // Unless giving check, this capture is likely bad - if ( !givesCheck - && ss->staticEval + PieceValue[EG][pos.captured_piece()] + 213 * depth <= alpha) - r++; + // Unless giving check, this capture is likely bad + if ( !givesCheck + && ss->staticEval + PieceValue[EG][pos.captured_piece()] + 213 * depth <= alpha) + r++; } Depth d = std::clamp(newDepth - r, 1, newDepth); @@ -1570,6 +1567,7 @@ moves_loop: // When in check, search starts from here [pos.moved_piece(move)] [to_sq(move)]; + // CounterMove based pruning if ( !captureOrPromotion && moveCount && (*contHist[0])[pos.moved_piece(move)][to_sq(move)] < CounterMovePruneThreshold diff --git a/src/search.h b/src/search.h index f60da4a5..72d43c31 100644 --- a/src/search.h +++ b/src/search.h @@ -71,7 +71,6 @@ struct RootMove { Value previousScore = -VALUE_INFINITE; int selDepth = 0; int tbRank = 0; - int bestMoveCount = 0; Value tbScore; std::vector pv; }; diff --git a/src/thread.cpp b/src/thread.cpp index b46fce5e..2fbf745d 100644 --- a/src/thread.cpp +++ b/src/thread.cpp @@ -224,16 +224,16 @@ Thread* ThreadPool::get_best_thread() const { votes[th->rootMoves[0].pv[0]] += (th->rootMoves[0].score - minScore + 14) * int(th->completedDepth); - if (abs(bestThread->rootMoves[0].score) >= VALUE_TB_WIN_IN_MAX_PLY) - { - // Make sure we pick the shortest mate / TB conversion or stave off mate the longest - if (th->rootMoves[0].score > bestThread->rootMoves[0].score) - bestThread = th; - } - else if ( th->rootMoves[0].score >= VALUE_TB_WIN_IN_MAX_PLY - || ( th->rootMoves[0].score > VALUE_TB_LOSS_IN_MAX_PLY - && votes[th->rootMoves[0].pv[0]] > votes[bestThread->rootMoves[0].pv[0]])) - bestThread = th; + if (abs(bestThread->rootMoves[0].score) >= VALUE_TB_WIN_IN_MAX_PLY) + { + // Make sure we pick the shortest mate / TB conversion or stave off mate the longest + if (th->rootMoves[0].score > bestThread->rootMoves[0].score) + bestThread = th; + } + else if ( th->rootMoves[0].score >= VALUE_TB_WIN_IN_MAX_PLY + || ( th->rootMoves[0].score > VALUE_TB_LOSS_IN_MAX_PLY + && votes[th->rootMoves[0].pv[0]] > votes[bestThread->rootMoves[0].pv[0]])) + bestThread = th; } return bestThread; diff --git a/src/uci.cpp b/src/uci.cpp index 3f3cc458..b63e55ad 100644 --- a/src/uci.cpp +++ b/src/uci.cpp @@ -85,7 +85,7 @@ namespace { Position p; p.set(pos.fen(), Options["UCI_Chess960"], &states->back(), Threads.main()); - Eval::verify_NNUE(); + Eval::NNUE::verify(); sync_cout << "\n" << Eval::trace(p) << sync_endl; } diff --git a/src/ucioption.cpp b/src/ucioption.cpp index 5e747a7f..bb0b8311 100644 --- a/src/ucioption.cpp +++ b/src/ucioption.cpp @@ -41,8 +41,8 @@ void on_hash_size(const Option& o) { TT.resize(size_t(o)); } void on_logger(const Option& o) { start_logger(o); } void on_threads(const Option& o) { Threads.set(size_t(o)); } void on_tb_path(const Option& o) { Tablebases::init(o); } -void on_use_NNUE(const Option& ) { Eval::init_NNUE(); } -void on_eval_file(const Option& ) { Eval::init_NNUE(); } +void on_use_NNUE(const Option& ) { Eval::NNUE::init(); } +void on_eval_file(const Option& ) { Eval::NNUE::init(); } /// Our case insensitive less() function as required by UCI protocol bool CaseInsensitiveLess::operator() (const string& s1, const string& s2) const {