VVLTC Search Tune

Values were tuned with 118k VVLTC games.

Tested against #5764.

Passed VVLTC 1st sprt:
https://tests.stockfishchess.org/tests/view/678331226ddf09c0b4b6fd78
LLR: 2.94 (-2.94,2.94) <0.00,2.00>
Total: 43556 W: 11219 L: 10942 D: 21395
Ptnml(0-2): 2, 3975, 13549, 4248, 4

Passed VVLTC 2nd sprt:
https://tests.stockfishchess.org/tests/view/67834aa06ddf09c0b4b6fe34
LLR: 2.95 (-2.94,2.94) <0.50,2.50>
Total: 37150 W: 9577 L: 9285 D: 18288
Ptnml(0-2): 2, 3344, 11593, 3632, 4

closes https://github.com/official-stockfish/Stockfish/pull/5765

Bench: 1258128
This commit is contained in:
Muzhen Gaming
2025-01-12 10:54:59 +08:00
committed by Disservin
parent 8b32e4825f
commit 93edf7a74c

View File

@@ -66,7 +66,7 @@ namespace {
// Futility margin // Futility margin
Value futility_margin(Depth d, bool noTtCutNode, bool improving, bool oppWorsening) { Value futility_margin(Depth d, bool noTtCutNode, bool improving, bool oppWorsening) {
Value futilityMult = 109 - 27 * noTtCutNode; Value futilityMult = 112 - 26 * noTtCutNode;
Value improvingDeduction = improving * futilityMult * 2; Value improvingDeduction = improving * futilityMult * 2;
Value worseningDeduction = oppWorsening * futilityMult / 3; Value worseningDeduction = oppWorsening * futilityMult / 3;
@@ -89,7 +89,7 @@ int correction_value(const Worker& w, const Position& pos, const Stack* ss) {
m.is_ok() ? (*(ss - 2)->continuationCorrectionHistory)[pos.piece_on(m.to_sq())][m.to_sq()] m.is_ok() ? (*(ss - 2)->continuationCorrectionHistory)[pos.piece_on(m.to_sq())][m.to_sq()]
: 0; : 0;
return (6384 * pcv + 3583 * macv + 6492 * micv + 6725 * (wnpcv + bnpcv) + 5880 * cntcv); return (6922 * pcv + 3837 * macv + 6238 * micv + 7490 * (wnpcv + bnpcv) + 6270 * cntcv);
} }
// Add correctionHistory value to raw staticEval and guarantee evaluation // Add correctionHistory value to raw staticEval and guarantee evaluation
@@ -99,10 +99,10 @@ Value to_corrected_static_eval(Value v, const int cv) {
} }
// History and stats update bonus, based on depth // History and stats update bonus, based on depth
int stat_bonus(Depth d) { return std::min(168 * d - 100, 1718); } int stat_bonus(Depth d) { return std::min(154 * d - 102, 1661); }
// History and stats update malus, based on depth // History and stats update malus, based on depth
int stat_malus(Depth d) { return std::min(768 * d - 257, 2351); } int stat_malus(Depth d) { return std::min(831 * d - 269, 2666); }
// Add a small random component to draw evaluations to avoid 3-fold blindness // Add a small random component to draw evaluations to avoid 3-fold blindness
Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); } Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); }
@@ -274,7 +274,7 @@ void Search::Worker::iterative_deepening() {
int searchAgainCounter = 0; int searchAgainCounter = 0;
lowPlyHistory.fill(106); lowPlyHistory.fill(97);
// Iterative deepening loop until requested to stop or the target depth is reached // Iterative deepening loop until requested to stop or the target depth is reached
while (++rootDepth < MAX_PLY && !threads.stop while (++rootDepth < MAX_PLY && !threads.stop
@@ -310,13 +310,13 @@ void Search::Worker::iterative_deepening() {
selDepth = 0; selDepth = 0;
// Reset aspiration window starting size // Reset aspiration window starting size
delta = 5 + std::abs(rootMoves[pvIdx].meanSquaredScore) / 13461; delta = 5 + std::abs(rootMoves[pvIdx].meanSquaredScore) / 12991;
Value avg = rootMoves[pvIdx].averageScore; Value avg = rootMoves[pvIdx].averageScore;
alpha = std::max(avg - delta, -VALUE_INFINITE); alpha = std::max(avg - delta, -VALUE_INFINITE);
beta = std::min(avg + delta, VALUE_INFINITE); beta = std::min(avg + delta, VALUE_INFINITE);
// Adjust optimism based on root move's averageScore (~4 Elo) // Adjust optimism based on root move's averageScore (~4 Elo)
optimism[us] = 150 * avg / (std::abs(avg) + 85); optimism[us] = 141 * avg / (std::abs(avg) + 83);
optimism[~us] = -optimism[us]; optimism[~us] = -optimism[us];
// Start with a small aspiration window and, in the case of a fail // Start with a small aspiration window and, in the case of a fail
@@ -498,10 +498,10 @@ void Search::Worker::iterative_deepening() {
// Reset histories, usually before a new game // Reset histories, usually before a new game
void Search::Worker::clear() { void Search::Worker::clear() {
mainHistory.fill(61); mainHistory.fill(63);
lowPlyHistory.fill(106); lowPlyHistory.fill(108);
captureHistory.fill(-598); captureHistory.fill(-631);
pawnHistory.fill(-1181); pawnHistory.fill(-1210);
pawnCorrectionHistory.fill(0); pawnCorrectionHistory.fill(0);
majorPieceCorrectionHistory.fill(0); majorPieceCorrectionHistory.fill(0);
minorPieceCorrectionHistory.fill(0); minorPieceCorrectionHistory.fill(0);
@@ -516,10 +516,10 @@ void Search::Worker::clear() {
for (StatsType c : {NoCaptures, Captures}) for (StatsType c : {NoCaptures, Captures})
for (auto& to : continuationHistory[inCheck][c]) for (auto& to : continuationHistory[inCheck][c])
for (auto& h : to) for (auto& h : to)
h.fill(-427); h.fill(-479);
for (size_t i = 1; i < reductions.size(); ++i) for (size_t i = 1; i < reductions.size(); ++i)
reductions[i] = int(19.43 * std::log(i)); reductions[i] = int(2143 / 100.0 * std::log(i));
refreshTable.clear(networks[numaAccessToken]); refreshTable.clear(networks[numaAccessToken]);
} }
@@ -636,20 +636,20 @@ Value Search::Worker::search(
if (!PvNode && !excludedMove && ttData.depth > depth - (ttData.value <= beta) if (!PvNode && !excludedMove && ttData.depth > depth - (ttData.value <= beta)
&& is_valid(ttData.value) // Can happen when !ttHit or when access race in probe() && is_valid(ttData.value) // Can happen when !ttHit or when access race in probe()
&& (ttData.bound & (ttData.value >= beta ? BOUND_LOWER : BOUND_UPPER)) && (ttData.bound & (ttData.value >= beta ? BOUND_LOWER : BOUND_UPPER))
&& (cutNode == (ttData.value >= beta) || depth > 8)) && (cutNode == (ttData.value >= beta) || depth > 9))
{ {
// If ttMove is quiet, update move sorting heuristics on TT hit (~2 Elo) // If ttMove is quiet, update move sorting heuristics on TT hit (~2 Elo)
if (ttData.move && ttData.value >= beta) if (ttData.move && ttData.value >= beta)
{ {
// Bonus for a quiet ttMove that fails high (~2 Elo) // Bonus for a quiet ttMove that fails high (~2 Elo)
if (!ttCapture) if (!ttCapture)
update_quiet_histories(pos, ss, *this, ttData.move, stat_bonus(depth) * 747 / 1024); update_quiet_histories(pos, ss, *this, ttData.move, stat_bonus(depth) * 746 / 1024);
// Extra penalty for early quiet moves of // Extra penalty for early quiet moves of
// the previous ply (~1 Elo on STC, ~2 Elo on LTC) // the previous ply (~1 Elo on STC, ~2 Elo on LTC)
if (prevSq != SQ_NONE && (ss - 1)->moveCount <= 2 && !priorCapture) if (prevSq != SQ_NONE && (ss - 1)->moveCount <= 2 && !priorCapture)
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
-stat_malus(depth + 1) * 1091 / 1024); -stat_malus(depth + 1) * 1042 / 1024);
} }
// Partial workaround for the graph history interaction problem // Partial workaround for the graph history interaction problem
@@ -757,11 +757,11 @@ Value Search::Worker::search(
// Use static evaluation difference to improve quiet move ordering (~9 Elo) // Use static evaluation difference to improve quiet move ordering (~9 Elo)
if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture) if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture)
{ {
int bonus = std::clamp(-10 * int((ss - 1)->staticEval + ss->staticEval), -1831, 1428) + 623; int bonus = std::clamp(-10 * int((ss - 1)->staticEval + ss->staticEval), -1881, 1413) + 616;
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus * 1340 / 1024; thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus * 1151 / 1024;
if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION) if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION)
thisThread->pawnHistory[pawn_structure_index(pos)][pos.piece_on(prevSq)][prevSq] thisThread->pawnHistory[pawn_structure_index(pos)][pos.piece_on(prevSq)][prevSq]
<< bonus * 1159 / 1024; << bonus * 1107 / 1024;
} }
// Set up the improving flag, which is true if current static evaluation is // Set up the improving flag, which is true if current static evaluation is
@@ -776,30 +776,30 @@ Value Search::Worker::search(
// If eval is really low, check with qsearch if we can exceed alpha. If the // If eval is really low, check with qsearch if we can exceed alpha. If the
// search suggests we cannot exceed alpha, return a speculative fail low. // search suggests we cannot exceed alpha, return a speculative fail low.
// For PvNodes, we must have a guard against mates being returned. // For PvNodes, we must have a guard against mates being returned.
if (!PvNode && eval < alpha - 469 - 307 * depth * depth) if (!PvNode && eval < alpha - 462 - 297 * depth * depth)
return qsearch<NonPV>(pos, ss, alpha - 1, alpha); return qsearch<NonPV>(pos, ss, alpha - 1, alpha);
// Step 8. Futility pruning: child node (~40 Elo) // Step 8. Futility pruning: child node (~40 Elo)
// The depth condition is important for mate finding. // The depth condition is important for mate finding.
if (!ss->ttPv && depth < 14 if (!ss->ttPv && depth < 14
&& eval - futility_margin(depth, cutNode && !ss->ttHit, improving, opponentWorsening) && eval - futility_margin(depth, cutNode && !ss->ttHit, improving, opponentWorsening)
- (ss - 1)->statScore / 290 - (ss - 1)->statScore / 310
+ (ss->staticEval == eval) * (40 - std::abs(correctionValue) / 131072) + (ss->staticEval == eval) * (40 - std::abs(correctionValue) / 131072)
>= beta >= beta
&& eval >= beta && (!ttData.move || ttCapture) && !is_loss(beta) && !is_win(eval)) && eval >= beta && (!ttData.move || ttCapture) && !is_loss(beta) && !is_win(eval))
return beta + (eval - beta) / 3; return beta + (eval - beta) / 3;
improving |= ss->staticEval >= beta + 100; improving |= ss->staticEval >= beta + 97;
// Step 9. Null move search with verification search (~35 Elo) // Step 9. Null move search with verification search (~35 Elo)
if (cutNode && (ss - 1)->currentMove != Move::null() && eval >= beta if (cutNode && (ss - 1)->currentMove != Move::null() && eval >= beta
&& ss->staticEval >= beta - 21 * depth + 421 && !excludedMove && pos.non_pawn_material(us) && ss->staticEval >= beta - 20 * depth + 440 && !excludedMove && pos.non_pawn_material(us)
&& ss->ply >= thisThread->nmpMinPly && !is_loss(beta)) && ss->ply >= thisThread->nmpMinPly && !is_loss(beta))
{ {
assert(eval - beta >= 0); assert(eval - beta >= 0);
// Null move dynamic reduction based on depth and eval // Null move dynamic reduction based on depth and eval
Depth R = std::min(int(eval - beta) / 235, 7) + depth / 3 + 5; Depth R = std::min(int(eval - beta) / 215, 7) + depth / 3 + 5;
ss->currentMove = Move::null(); ss->currentMove = Move::null();
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0]; ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
@@ -847,7 +847,7 @@ Value Search::Worker::search(
// Step 11. ProbCut (~10 Elo) // Step 11. ProbCut (~10 Elo)
// If we have a good enough capture (or queen promotion) and a reduced search // If we have a good enough capture (or queen promotion) and a reduced search
// returns a value much above beta, we can (almost) safely prune the previous move. // returns a value much above beta, we can (almost) safely prune the previous move.
probCutBeta = beta + 187 - 56 * improving; probCutBeta = beta + 174 - 56 * improving;
if (depth > 3 if (depth > 3
&& !is_decisive(beta) && !is_decisive(beta)
// If value from transposition table is lower than probCutBeta, don't attempt // If value from transposition table is lower than probCutBeta, don't attempt
@@ -909,7 +909,7 @@ Value Search::Worker::search(
moves_loop: // When in check, search starts here moves_loop: // When in check, search starts here
// Step 12. A small Probcut idea (~4 Elo) // Step 12. A small Probcut idea (~4 Elo)
probCutBeta = beta + 417; probCutBeta = beta + 412;
if ((ttData.bound & BOUND_LOWER) && ttData.depth >= depth - 4 && ttData.value >= probCutBeta if ((ttData.bound & BOUND_LOWER) && ttData.depth >= depth - 4 && ttData.value >= probCutBeta
&& !is_decisive(beta) && is_valid(ttData.value) && !is_decisive(ttData.value)) && !is_decisive(beta) && is_valid(ttData.value) && !is_decisive(ttData.value))
return probCutBeta; return probCutBeta;
@@ -992,15 +992,15 @@ moves_loop: // When in check, search starts here
// Futility pruning for captures (~2 Elo) // Futility pruning for captures (~2 Elo)
if (!givesCheck && lmrDepth < 7 && !ss->inCheck) if (!givesCheck && lmrDepth < 7 && !ss->inCheck)
{ {
Value futilityValue = ss->staticEval + 287 + 253 * lmrDepth Value futilityValue = ss->staticEval + 271 + 243 * lmrDepth
+ PieceValue[capturedPiece] + captHist / 7; + PieceValue[capturedPiece] + captHist / 7;
if (futilityValue <= alpha) if (futilityValue <= alpha)
continue; continue;
} }
// SEE based pruning for captures and checks (~11 Elo) // SEE based pruning for captures and checks (~11 Elo)
int seeHist = std::clamp(captHist / 33, -161 * depth, 156 * depth); int seeHist = std::clamp(captHist / 37, -152 * depth, 141 * depth);
if (!pos.see_ge(move, -162 * depth - seeHist)) if (!pos.see_ge(move, -156 * depth - seeHist))
continue; continue;
} }
else else
@@ -1011,15 +1011,15 @@ moves_loop: // When in check, search starts here
+ thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()]; + thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()];
// Continuation history based pruning (~2 Elo) // Continuation history based pruning (~2 Elo)
if (history < -3884 * depth) if (history < -3901 * depth)
continue; continue;
history += 2 * thisThread->mainHistory[us][move.from_to()]; history += 2 * thisThread->mainHistory[us][move.from_to()];
lmrDepth += history / 3609; lmrDepth += history / 3459;
Value futilityValue = Value futilityValue =
ss->staticEval + (bestValue < ss->staticEval - 45 ? 140 : 43) + 141 * lmrDepth; ss->staticEval + (bestValue < ss->staticEval - 47 ? 137 : 47) + 142 * lmrDepth;
// Futility pruning: parent node (~13 Elo) // Futility pruning: parent node (~13 Elo)
if (!ss->inCheck && lmrDepth < 12 && futilityValue <= alpha) if (!ss->inCheck && lmrDepth < 12 && futilityValue <= alpha)
@@ -1060,7 +1060,7 @@ moves_loop: // When in check, search starts here
&& is_valid(ttData.value) && !is_decisive(ttData.value) && is_valid(ttData.value) && !is_decisive(ttData.value)
&& (ttData.bound & BOUND_LOWER) && ttData.depth >= depth - 3) && (ttData.bound & BOUND_LOWER) && ttData.depth >= depth - 3)
{ {
Value singularBeta = ttData.value - (56 + 79 * (ss->ttPv && !PvNode)) * depth / 64; Value singularBeta = ttData.value - (52 + 74 * (ss->ttPv && !PvNode)) * depth / 64;
Depth singularDepth = newDepth / 2; Depth singularDepth = newDepth / 2;
ss->excludedMove = move; ss->excludedMove = move;
@@ -1070,13 +1070,13 @@ moves_loop: // When in check, search starts here
if (value < singularBeta) if (value < singularBeta)
{ {
int doubleMargin = 249 * PvNode - 194 * !ttCapture; int doubleMargin = 259 * PvNode - 194 * !ttCapture;
int tripleMargin = 94 + 287 * PvNode - 249 * !ttCapture + 99 * ss->ttPv; int tripleMargin = 90 + 266 * PvNode - 272 * !ttCapture + 107 * ss->ttPv;
extension = 1 + (value < singularBeta - doubleMargin) extension = 1 + (value < singularBeta - doubleMargin)
+ (value < singularBeta - tripleMargin); + (value < singularBeta - tripleMargin);
depth += ((!PvNode) && (depth < 14)); depth += ((!PvNode) && (depth < 15));
} }
// Multi-cut pruning // Multi-cut pruning
@@ -1109,7 +1109,7 @@ moves_loop: // When in check, search starts here
else if (PvNode && move.to_sq() == prevSq else if (PvNode && move.to_sq() == prevSq
&& thisThread->captureHistory[movedPiece][move.to_sq()] && thisThread->captureHistory[movedPiece][move.to_sq()]
[type_of(pos.piece_on(move.to_sq()))] [type_of(pos.piece_on(move.to_sq()))]
> 4321) > 4126)
extension = 1; extension = 1;
} }
@@ -1138,46 +1138,46 @@ moves_loop: // When in check, search starts here
// Decrease reduction if position is or has been on the PV (~7 Elo) // Decrease reduction if position is or has been on the PV (~7 Elo)
if (ss->ttPv) if (ss->ttPv)
r -= 1024 + ((ttData.value > alpha) + (ttData.depth >= depth)) * 1024; r -= 1037 + (ttData.value > alpha) * 965 + (ttData.depth >= depth) * 960;
// Decrease reduction for PvNodes (~0 Elo on STC, ~2 Elo on LTC) // Decrease reduction for PvNodes (~0 Elo on STC, ~2 Elo on LTC)
if (PvNode) if (PvNode)
r -= 1024; r -= 1018;
// These reduction adjustments have no proven non-linear scaling // These reduction adjustments have no proven non-linear scaling
r += 330; r += 307;
r -= std::abs(correctionValue) / 32768; r -= std::abs(correctionValue) / 34112;
// Increase reduction for cut nodes (~4 Elo) // Increase reduction for cut nodes (~4 Elo)
if (cutNode) if (cutNode)
r += 2518 - (ttData.depth >= depth && ss->ttPv) * 991; r += 2355 - (ttData.depth >= depth && ss->ttPv) * 1141;
// Increase reduction if ttMove is a capture but the current move is not a capture (~3 Elo) // Increase reduction if ttMove is a capture but the current move is not a capture (~3 Elo)
if (ttCapture && !capture) if (ttCapture && !capture)
r += 1043 + (depth < 8) * 999; r += 1087 + (depth < 8) * 990;
// Increase reduction if next ply has a lot of fail high (~5 Elo) // Increase reduction if next ply has a lot of fail high (~5 Elo)
if ((ss + 1)->cutoffCnt > 3) if ((ss + 1)->cutoffCnt > 3)
r += 938 + allNode * 960; r += 940 + allNode * 887;
// For first picked move (ttMove) reduce reduction (~3 Elo) // For first picked move (ttMove) reduce reduction (~3 Elo)
else if (move == ttData.move) else if (move == ttData.move)
r -= 1879; r -= 1960;
if (capture) if (capture)
ss->statScore = ss->statScore =
7 * int(PieceValue[pos.captured_piece()]) 7 * int(PieceValue[pos.captured_piece()])
+ thisThread->captureHistory[movedPiece][move.to_sq()][type_of(pos.captured_piece())] + thisThread->captureHistory[movedPiece][move.to_sq()][type_of(pos.captured_piece())]
- 5000; - 4666;
else else
ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()] ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()]
+ (*contHist[0])[movedPiece][move.to_sq()] + (*contHist[0])[movedPiece][move.to_sq()]
+ (*contHist[1])[movedPiece][move.to_sq()] - 3996; + (*contHist[1])[movedPiece][move.to_sq()] - 3874;
// Decrease/increase reduction for moves with a good/bad history (~8 Elo) // Decrease/increase reduction for moves with a good/bad history (~8 Elo)
r -= ss->statScore * 1287 / 16384; r -= ss->statScore * 1451 / 16384;
// Step 17. Late moves reduction / extension (LMR, ~117 Elo) // Step 17. Late moves reduction / extension (LMR, ~117 Elo)
if (depth >= 2 && moveCount > 1) if (depth >= 2 && moveCount > 1)
@@ -1197,7 +1197,7 @@ moves_loop: // When in check, search starts here
{ {
// Adjust full-depth search based on LMR results - if the result was // Adjust full-depth search based on LMR results - if the result was
// good enough search deeper, if it was bad enough search shallower. // good enough search deeper, if it was bad enough search shallower.
const bool doDeeperSearch = value > (bestValue + 42 + 2 * newDepth); // (~1 Elo) const bool doDeeperSearch = value > (bestValue + 40 + 2 * newDepth); // (~1 Elo)
const bool doShallowerSearch = value < bestValue + 10; // (~2 Elo) const bool doShallowerSearch = value < bestValue + 10; // (~2 Elo)
newDepth += doDeeperSearch - doShallowerSearch; newDepth += doDeeperSearch - doShallowerSearch;
@@ -1216,11 +1216,11 @@ moves_loop: // When in check, search starts here
{ {
// Increase reduction if ttMove is not present (~6 Elo) // Increase reduction if ttMove is not present (~6 Elo)
if (!ttData.move) if (!ttData.move)
r += 2037; r += 2111;
// Note that if expected reduction is high, we reduce search depth by 1 here (~9 Elo) // Note that if expected reduction is high, we reduce search depth by 1 here (~9 Elo)
value = value =
-search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth - (r > 2983), !cutNode); -search<NonPV>(pos, ss + 1, -(alpha + 1), -alpha, newDepth - (r > 3444), !cutNode);
} }
// For PV nodes only, do a full PV search on the first move or after a fail high, // For PV nodes only, do a full PV search on the first move or after a fail high,
@@ -1369,25 +1369,25 @@ moves_loop: // When in check, search starts here
// Bonus for prior countermove that caused the fail low // Bonus for prior countermove that caused the fail low
else if (!priorCapture && prevSq != SQ_NONE) else if (!priorCapture && prevSq != SQ_NONE)
{ {
int bonusScale = (117 * (depth > 5) + 39 * !allNode + 168 * ((ss - 1)->moveCount > 8) int bonusScale = (118 * (depth > 5) + 37 * !allNode + 169 * ((ss - 1)->moveCount > 8)
+ 115 * (!ss->inCheck && bestValue <= ss->staticEval - 108) + 128 * (!ss->inCheck && bestValue <= ss->staticEval - 102)
+ 119 * (!(ss - 1)->inCheck && bestValue <= -(ss - 1)->staticEval - 83)); + 115 * (!(ss - 1)->inCheck && bestValue <= -(ss - 1)->staticEval - 82));
// Proportional to "how much damage we have to undo" // Proportional to "how much damage we have to undo"
bonusScale += std::min(-(ss - 1)->statScore / 113, 300); bonusScale += std::min(-(ss - 1)->statScore / 106, 318);
bonusScale = std::max(bonusScale, 0); bonusScale = std::max(bonusScale, 0);
const int scaledBonus = stat_bonus(depth) * bonusScale / 32; const int scaledBonus = stat_bonus(depth) * bonusScale / 32;
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
scaledBonus * 416 / 1024); scaledBonus * 436 / 1024);
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << scaledBonus * 212 / 1024; thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << scaledBonus * 207 / 1024;
if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION) if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION)
thisThread->pawnHistory[pawn_structure_index(pos)][pos.piece_on(prevSq)][prevSq] thisThread->pawnHistory[pawn_structure_index(pos)][pos.piece_on(prevSq)][prevSq]
<< scaledBonus * 1073 / 1024; << scaledBonus * 1195 / 1024;
} }
else if (priorCapture && prevSq != SQ_NONE) else if (priorCapture && prevSq != SQ_NONE)
@@ -1422,14 +1422,14 @@ moves_loop: // When in check, search starts here
|| (bestValue > ss->staticEval && bestMove))) // positive correction & no fail low || (bestValue > ss->staticEval && bestMove))) // positive correction & no fail low
{ {
const auto m = (ss - 1)->currentMove; const auto m = (ss - 1)->currentMove;
constexpr int nonPawnWeight = 154; constexpr int nonPawnWeight = 165;
auto bonus = std::clamp(int(bestValue - ss->staticEval) * depth / 8, auto bonus = std::clamp(int(bestValue - ss->staticEval) * depth / 8,
-CORRECTION_HISTORY_LIMIT / 4, CORRECTION_HISTORY_LIMIT / 4); -CORRECTION_HISTORY_LIMIT / 4, CORRECTION_HISTORY_LIMIT / 4);
thisThread->pawnCorrectionHistory[us][pawn_structure_index<Correction>(pos)] thisThread->pawnCorrectionHistory[us][pawn_structure_index<Correction>(pos)]
<< bonus * 107 / 128; << bonus * 114 / 128;
thisThread->majorPieceCorrectionHistory[us][major_piece_index(pos)] << bonus * 162 / 128; thisThread->majorPieceCorrectionHistory[us][major_piece_index(pos)] << bonus * 163 / 128;
thisThread->minorPieceCorrectionHistory[us][minor_piece_index(pos)] << bonus * 148 / 128; thisThread->minorPieceCorrectionHistory[us][minor_piece_index(pos)] << bonus * 146 / 128;
thisThread->nonPawnCorrectionHistory[WHITE][us][non_pawn_index<WHITE>(pos)] thisThread->nonPawnCorrectionHistory[WHITE][us][non_pawn_index<WHITE>(pos)]
<< bonus * nonPawnWeight / 128; << bonus * nonPawnWeight / 128;
thisThread->nonPawnCorrectionHistory[BLACK][us][non_pawn_index<BLACK>(pos)] thisThread->nonPawnCorrectionHistory[BLACK][us][non_pawn_index<BLACK>(pos)]
@@ -1561,7 +1561,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta)
if (bestValue > alpha) if (bestValue > alpha)
alpha = bestValue; alpha = bestValue;
futilityBase = ss->staticEval + 306; futilityBase = ss->staticEval + 301;
} }
const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory, const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory,
@@ -1624,11 +1624,11 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta)
+ (*contHist[1])[pos.moved_piece(move)][move.to_sq()] + (*contHist[1])[pos.moved_piece(move)][move.to_sq()]
+ thisThread->pawnHistory[pawn_structure_index(pos)][pos.moved_piece(move)] + thisThread->pawnHistory[pawn_structure_index(pos)][pos.moved_piece(move)]
[move.to_sq()] [move.to_sq()]
<= 5095) <= 5228)
continue; continue;
// Do not search moves with bad enough SEE values (~5 Elo) // Do not search moves with bad enough SEE values (~5 Elo)
if (!pos.see_ge(move, -83)) if (!pos.see_ge(move, -80))
continue; continue;
} }
@@ -1696,7 +1696,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta)
Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) const { Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) const {
int reductionScale = reductions[d] * reductions[mn]; int reductionScale = reductions[d] * reductions[mn];
return reductionScale - delta * 814 / rootDelta + !i * reductionScale / 3 + 1304; return reductionScale - delta * 768 / rootDelta + !i * reductionScale * 108 / 300 + 1168;
} }
// elapsed() returns the time elapsed since the search started. If the // elapsed() returns the time elapsed since the search started. If the
@@ -1795,30 +1795,30 @@ void update_all_stats(const Position& pos,
if (!pos.capture_stage(bestMove)) if (!pos.capture_stage(bestMove))
{ {
update_quiet_histories(pos, ss, workerThread, bestMove, bonus * 1131 / 1024); update_quiet_histories(pos, ss, workerThread, bestMove, bonus * 1216 / 1024);
// Decrease stats for all non-best quiet moves // Decrease stats for all non-best quiet moves
for (Move move : quietsSearched) for (Move move : quietsSearched)
update_quiet_histories(pos, ss, workerThread, move, -malus * 1028 / 1024); update_quiet_histories(pos, ss, workerThread, move, -malus * 1062 / 1024);
} }
else else
{ {
// Increase stats for the best move in case it was a capture move // Increase stats for the best move in case it was a capture move
captured = type_of(pos.piece_on(bestMove.to_sq())); captured = type_of(pos.piece_on(bestMove.to_sq()));
captureHistory[moved_piece][bestMove.to_sq()][captured] << bonus * 1291 / 1024; captureHistory[moved_piece][bestMove.to_sq()][captured] << bonus * 1272 / 1024;
} }
// Extra penalty for a quiet early move that was not a TT move in // Extra penalty for a quiet early move that was not a TT move in
// previous ply when it gets refuted. // previous ply when it gets refuted.
if (prevSq != SQ_NONE && ((ss - 1)->moveCount == 1 + (ss - 1)->ttHit) && !pos.captured_piece()) if (prevSq != SQ_NONE && ((ss - 1)->moveCount == 1 + (ss - 1)->ttHit) && !pos.captured_piece())
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -malus * 919 / 1024); update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -malus * 966 / 1024);
// Decrease stats for all non-best capture moves // Decrease stats for all non-best capture moves
for (Move move : capturesSearched) for (Move move : capturesSearched)
{ {
moved_piece = pos.moved_piece(move); moved_piece = pos.moved_piece(move);
captured = type_of(pos.piece_on(move.to_sq())); captured = type_of(pos.piece_on(move.to_sq()));
captureHistory[moved_piece][move.to_sq()][captured] << -malus * 1090 / 1024; captureHistory[moved_piece][move.to_sq()][captured] << -malus * 1205 / 1024;
} }
} }
@@ -1827,7 +1827,7 @@ void update_all_stats(const Position& pos,
// at ply -1, -2, -3, -4, and -6 with current move. // at ply -1, -2, -3, -4, and -6 with current move.
void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus) { void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus) {
static constexpr std::array<ConthistBonus, 5> conthist_bonuses = { static constexpr std::array<ConthistBonus, 5> conthist_bonuses = {
{{1, 1024}, {2, 571}, {3, 339}, {4, 500}, {6, 592}}}; {{1, 1025}, {2, 621}, {3, 325}, {4, 512}, {6, 534}}};
for (const auto [i, weight] : conthist_bonuses) for (const auto [i, weight] : conthist_bonuses)
{ {
@@ -1848,12 +1848,12 @@ void update_quiet_histories(
workerThread.mainHistory[us][move.from_to()] << bonus; // Untuned to prevent duplicate effort workerThread.mainHistory[us][move.from_to()] << bonus; // Untuned to prevent duplicate effort
if (ss->ply < LOW_PLY_HISTORY_SIZE) if (ss->ply < LOW_PLY_HISTORY_SIZE)
workerThread.lowPlyHistory[ss->ply][move.from_to()] << bonus * 874 / 1024; workerThread.lowPlyHistory[ss->ply][move.from_to()] << bonus * 879 / 1024;
update_continuation_histories(ss, pos.moved_piece(move), move.to_sq(), bonus * 853 / 1024); update_continuation_histories(ss, pos.moved_piece(move), move.to_sq(), bonus * 888 / 1024);
int pIndex = pawn_structure_index(pos); int pIndex = pawn_structure_index(pos);
workerThread.pawnHistory[pIndex][pos.moved_piece(move)][move.to_sq()] << bonus * 628 / 1024; workerThread.pawnHistory[pIndex][pos.moved_piece(move)][move.to_sq()] << bonus * 634 / 1024;
} }
} }