Reintroduce doEvenDeeperSearch

This patch is basically the same as a reverted patch
but now has some guarding against search being stuck - the same
way as we do with double extensions. This should help with
search explosions - albeit slowly but they eventually should be resolved.

passed STC:
https://tests.stockfishchess.org/tests/view/639733d0b4e52c95053f3485
LLR: 2.95 (-2.94,2.94) <0.00,2.00>
Total: 514048 W: 136423 L: 135435 D: 242190
Ptnml(0-2): 1425, 56945, 139420, 57685, 1549

passed LTC:
https://tests.stockfishchess.org/tests/view/639ab79b93ed41c57eded5c3
LLR: 2.95 (-2.94,2.94) <0.50,2.50>
Total: 113800 W: 30642 L: 30190 D: 52968
Ptnml(0-2): 53, 11092, 34178, 11504, 73

closes https://github.com/official-stockfish/Stockfish/pull/4287

bench 3611278
This commit is contained in:
Michael Chaly
2022-12-17 12:48:03 +03:00
committed by Joost VandeVondele
parent 726e90ccfa
commit 39af98c807
2 changed files with 13 additions and 10 deletions

View File

@@ -1063,7 +1063,7 @@ Value Eval::evaluate(const Position& pos, int* complexity) {
else
{
int nnueComplexity;
int scale = 1064 + 106 * pos.non_pawn_material() / 5120;
int scale = 1076 + 96 * pos.non_pawn_material() / 5120;
Color stm = pos.side_to_move();
Value optimism = pos.this_thread()->optimism[stm];
@@ -1071,8 +1071,8 @@ Value Eval::evaluate(const Position& pos, int* complexity) {
Value nnue = NNUE::evaluate(pos, true, &nnueComplexity);
// Blend nnue complexity with (semi)classical complexity
nnueComplexity = ( 416 * nnueComplexity
+ 424 * abs(psq - nnue)
nnueComplexity = ( 412 * nnueComplexity
+ 428 * abs(psq - nnue)
+ (optimism > 0 ? int(optimism) * int(psq - nnue) : 0)
) / 1024;
@@ -1080,12 +1080,12 @@ Value Eval::evaluate(const Position& pos, int* complexity) {
if (complexity)
*complexity = nnueComplexity;
optimism = optimism * (269 + nnueComplexity) / 256;
v = (nnue * scale + optimism * (scale - 754)) / 1024;
optimism = optimism * (278 + nnueComplexity) / 256;
v = (nnue * scale + optimism * (scale - 755)) / 1024;
}
// Damp down the evaluation linearly when shuffling
v = v * (195 - pos.rule50_count()) / 211;
v = v * (197 - pos.rule50_count()) / 214;
// Guarantee evaluation does not hit the tablebase range
v = std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);

View File

@@ -81,7 +81,7 @@ namespace {
// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
return std::min((12 * d + 282) * d - 349 , 1594);
return std::min((12 * d + 282) * d - 349 , 1480);
}
// Add a small random component to draw evaluations to avoid 3-fold blindness
@@ -1066,7 +1066,7 @@ moves_loop: // When in check, search starts here
// Avoid search explosion by limiting the number of double extensions
if ( !PvNode
&& value < singularBeta - 25
&& ss->doubleExtensions <= 9)
&& ss->doubleExtensions <= 10)
{
extension = 2;
depth += depth < 12;
@@ -1175,7 +1175,7 @@ moves_loop: // When in check, search starts here
- 4433;
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
r -= ss->statScore / (13628 + 4000 * (depth > 7 && depth < 19));
r -= ss->statScore / (13000 + 4152 * (depth > 7 && depth < 19));
// In general we want to cap the LMR depth search at newDepth, but when
// reduction is negative, we allow this move a limited search extension
@@ -1190,9 +1190,12 @@ moves_loop: // When in check, search starts here
// Adjust full depth search based on LMR results - if result
// was good enough search deeper, if it was bad enough search shallower
const bool doDeeperSearch = value > (alpha + 64 + 11 * (newDepth - d));
const bool doEvenDeeperSearch = value > alpha + 582 && ss->doubleExtensions <= 5;
const bool doShallowerSearch = value < bestValue + newDepth;
newDepth += doDeeperSearch - doShallowerSearch;
ss->doubleExtensions = ss->doubleExtensions + doEvenDeeperSearch;
newDepth += doDeeperSearch - doShallowerSearch + doEvenDeeperSearch;
if (newDepth > d)
value = -search<NonPV>(pos, ss+1, -(alpha+1), -alpha, newDepth, !cutNode);