mirror of
https://github.com/HChaZZY/Stockfish.git
synced 2025-12-25 19:46:55 +08:00
Synchronize printed info regions in the learner and sfen reader.
This commit is contained in:
@@ -138,7 +138,8 @@ namespace Learner
|
||||
count = 0.0;
|
||||
}
|
||||
|
||||
void print(const std::string& prefix, ostream& s) const
|
||||
template <typename StreamT>
|
||||
void print(const std::string& prefix, StreamT& s) const
|
||||
{
|
||||
s << "==> " << prefix << "_cross_entropy_eval = " << cross_entropy_eval / count << endl;
|
||||
s << "==> " << prefix << "_cross_entropy_win = " << cross_entropy_win / count << endl;
|
||||
@@ -499,8 +500,9 @@ namespace Learner
|
||||
if (validation_set_file_name.empty()
|
||||
&& sfen_for_mse.size() != sfen_for_mse_size)
|
||||
{
|
||||
cout
|
||||
<< "Error reading sfen_for_mse. Read " << sfen_for_mse.size()
|
||||
auto out = sync_region_cout.new_region();
|
||||
out
|
||||
<< "INFO (learn): Error reading sfen_for_mse. Read " << sfen_for_mse.size()
|
||||
<< " out of " << sfen_for_mse_size << '\n';
|
||||
|
||||
return;
|
||||
@@ -514,7 +516,8 @@ namespace Learner
|
||||
latest_loss_sum = 0.0;
|
||||
latest_loss_count = 0;
|
||||
|
||||
cout << "initial loss: " << best_loss << endl;
|
||||
auto out = sync_region_cout.new_region();
|
||||
out << "INFO (learn): initial loss = " << best_loss << endl;
|
||||
}
|
||||
|
||||
stop_flag = false;
|
||||
@@ -585,7 +588,8 @@ namespace Learner
|
||||
if (pos.set_from_packed_sfen(ps.sfen, &si, &th) != 0)
|
||||
{
|
||||
// Malformed sfen
|
||||
cout << "Error! : illigal packed sfen = " << pos.fen() << endl;
|
||||
auto out = sync_region_cout.new_region();
|
||||
out << "ERROR: illigal packed sfen = " << pos.fen() << endl;
|
||||
goto RETRY_READ;
|
||||
}
|
||||
|
||||
@@ -674,14 +678,16 @@ namespace Learner
|
||||
TT.new_search();
|
||||
TimePoint elapsed = now() - Search::Limits.startTime + 1;
|
||||
|
||||
cout << "\n";
|
||||
cout << "PROGRESS (calc_loss): " << now_string()
|
||||
auto out = sync_region_cout.new_region();
|
||||
|
||||
out << "\n";
|
||||
out << "PROGRESS (calc_loss): " << now_string()
|
||||
<< ", " << total_done << " sfens"
|
||||
<< ", " << total_done * 1000 / elapsed << " sfens/second"
|
||||
<< ", epoch " << epoch
|
||||
<< endl;
|
||||
|
||||
cout << "==> learning rate = " << global_learning_rate << endl;
|
||||
out << "==> learning rate = " << global_learning_rate << endl;
|
||||
|
||||
// For calculation of verification data loss
|
||||
AtomicLoss test_loss_sum{};
|
||||
@@ -694,11 +700,11 @@ namespace Learner
|
||||
atomic<int> move_accord_count{0};
|
||||
|
||||
auto mainThread = Threads.main();
|
||||
mainThread->execute_with_worker([](auto& th){
|
||||
mainThread->execute_with_worker([&out](auto& th){
|
||||
auto& pos = th.rootPos;
|
||||
StateInfo si;
|
||||
pos.set(StartFEN, false, &si, &th);
|
||||
cout << "==> startpos eval = " << Eval::evaluate(pos) << endl;
|
||||
out << "==> startpos eval = " << Eval::evaluate(pos) << endl;
|
||||
});
|
||||
mainThread->wait_for_worker_finished();
|
||||
|
||||
@@ -721,19 +727,19 @@ namespace Learner
|
||||
|
||||
if (psv.size() && test_loss_sum.count > 0.0)
|
||||
{
|
||||
test_loss_sum.print("test", cout);
|
||||
test_loss_sum.print("test", out);
|
||||
|
||||
if (learn_loss_sum.count > 0.0)
|
||||
{
|
||||
learn_loss_sum.print("learn", cout);
|
||||
learn_loss_sum.print("learn", out);
|
||||
}
|
||||
|
||||
cout << "==> norm = " << sum_norm << endl;
|
||||
cout << "==> move accuracy = " << (move_accord_count * 100.0 / psv.size()) << "%" << endl;
|
||||
out << "==> norm = " << sum_norm << endl;
|
||||
out << "==> move accuracy = " << (move_accord_count * 100.0 / psv.size()) << "%" << endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
cout << "Error! : psv.size() = " << psv.size() << " , done = " << test_loss_sum.count << endl;
|
||||
out << "ERROR: psv.size() = " << psv.size() << " , done = " << test_loss_sum.count << endl;
|
||||
}
|
||||
|
||||
learn_loss_sum.reset();
|
||||
|
||||
@@ -209,18 +209,19 @@ namespace Learner{
|
||||
|
||||
sfen_input_stream = open_sfen_input_file(currentFilename);
|
||||
|
||||
auto out = sync_region_cout.new_region();
|
||||
if (sfen_input_stream == nullptr)
|
||||
{
|
||||
std::cout << "INFO (sfen_reader): File does not exist: " << currentFilename << '\n';
|
||||
out << "INFO (sfen_reader): File does not exist: " << currentFilename << '\n';
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << "INFO (sfen_reader): Opened file for reading: " << currentFilename << '\n';
|
||||
out << "INFO (sfen_reader): Opened file for reading: " << currentFilename << '\n';
|
||||
|
||||
// in case the file is empty or was deleted.
|
||||
if (sfen_input_stream->eof())
|
||||
{
|
||||
std::cout << "INFO (sfen_reader): File empty, nothing to read.\n";
|
||||
out << "==> File empty, nothing to read.\n";
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -232,7 +233,8 @@ namespace Learner{
|
||||
|
||||
if (sfen_input_stream == nullptr && !open_next_file())
|
||||
{
|
||||
std::cout << "INFO (sfen_reader): End of files." << std::endl;
|
||||
auto out = sync_region_cout.new_region();
|
||||
out << "INFO (sfen_reader): End of files." << std::endl;
|
||||
end_of_files = true;
|
||||
return;
|
||||
}
|
||||
@@ -271,7 +273,8 @@ namespace Learner{
|
||||
if(!open_next_file())
|
||||
{
|
||||
// There was no next file. Abort.
|
||||
std::cout << "INFO (sfen_reader): End of files." << std::endl;
|
||||
auto out = sync_region_cout.new_region();
|
||||
out << "INFO (sfen_reader): End of files." << std::endl;
|
||||
end_of_files = true;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -224,7 +224,9 @@ namespace Eval::NNUE {
|
||||
const double avg_abs_eval_diff = abs_eval_diff_sum / batch_size;
|
||||
const double avg_abs_discrete_eval = abs_discrete_eval_sum / batch_size;
|
||||
|
||||
std::cout << "INFO (update_parameters):"
|
||||
auto out = sync_region_cout.new_region();
|
||||
|
||||
out << "INFO (update_parameters):"
|
||||
<< " epoch = " << epoch
|
||||
<< " , avg_abs(trainer_eval-nnue_eval) = " << avg_abs_eval_diff
|
||||
<< " , avg_abs(nnue_eval) = " << avg_abs_discrete_eval
|
||||
|
||||
@@ -94,19 +94,24 @@ namespace Eval::NNUE {
|
||||
|
||||
// Check if there are any problems with learning
|
||||
void check_health() {
|
||||
|
||||
const auto largest_min_activation = *std::max_element(
|
||||
std::begin(min_activations_), std::end(min_activations_));
|
||||
const auto smallest_max_activation = *std::min_element(
|
||||
std::begin(max_activations_), std::end(max_activations_));
|
||||
|
||||
std::cout << "INFO (check_health):"
|
||||
<< " layer " << LayerType::kLayerIndex
|
||||
<< " - " << LayerType::get_name()
|
||||
<< std::endl;
|
||||
auto out = sync_region_cout.new_region();
|
||||
|
||||
std::cout << "==> largest min activation = " << largest_min_activation
|
||||
<< " , smallest max activation = " << smallest_max_activation
|
||||
<< std::endl;
|
||||
out << "INFO (check_health):"
|
||||
<< " layer " << LayerType::kLayerIndex
|
||||
<< " - " << LayerType::get_name()
|
||||
<< std::endl;
|
||||
|
||||
out << "==> largest min activation = " << largest_min_activation
|
||||
<< " , smallest max activation = " << smallest_max_activation
|
||||
<< std::endl;
|
||||
|
||||
out.unlock();
|
||||
|
||||
std::fill(std::begin(min_activations_), std::end(min_activations_),
|
||||
std::numeric_limits<LearnFloatType>::max());
|
||||
|
||||
@@ -330,33 +330,38 @@ namespace Eval::NNUE {
|
||||
|
||||
// Check if there are any problems with learning
|
||||
void check_health() {
|
||||
std::cout << "INFO (check_health):"
|
||||
<< " layer " << LayerType::kLayerIndex
|
||||
<< " - " << LayerType::get_name()
|
||||
<< std::endl;
|
||||
|
||||
std::cout << "==> observed " << observed_features.count()
|
||||
<< " (out of " << kInputDimensions << ") features"
|
||||
<< std::endl;
|
||||
|
||||
constexpr LearnFloatType kPreActivationLimit =
|
||||
std::numeric_limits<typename LayerType::WeightType>::max() /
|
||||
kWeightScale;
|
||||
|
||||
std::cout << "==> (min, max) of pre-activations = "
|
||||
<< min_pre_activation_ << ", "
|
||||
<< max_pre_activation_ << " (limit = "
|
||||
<< kPreActivationLimit << ")"
|
||||
<< std::endl;
|
||||
|
||||
const auto largest_min_activation = *std::max_element(
|
||||
std::begin(min_activations_), std::end(min_activations_));
|
||||
const auto smallest_max_activation = *std::min_element(
|
||||
std::begin(max_activations_), std::end(max_activations_));
|
||||
|
||||
std::cout << "==> largest min activation = " << largest_min_activation
|
||||
<< " , smallest max activation = " << smallest_max_activation
|
||||
<< std::endl;
|
||||
auto out = sync_region_cout.new_region();
|
||||
|
||||
out << "INFO (check_health):"
|
||||
<< " layer " << LayerType::kLayerIndex
|
||||
<< " - " << LayerType::get_name()
|
||||
<< std::endl;
|
||||
|
||||
out << "==> observed " << observed_features.count()
|
||||
<< " (out of " << kInputDimensions << ") features"
|
||||
<< std::endl;
|
||||
|
||||
out << "==> (min, max) of pre-activations = "
|
||||
<< min_pre_activation_ << ", "
|
||||
<< max_pre_activation_ << " (limit = "
|
||||
<< kPreActivationLimit << ")"
|
||||
<< std::endl;
|
||||
|
||||
out << "==> largest min activation = " << largest_min_activation
|
||||
<< " , smallest max activation = " << smallest_max_activation
|
||||
<< std::endl;
|
||||
|
||||
out.unlock();
|
||||
|
||||
std::fill(std::begin(min_activations_), std::end(min_activations_),
|
||||
std::numeric_limits<LearnFloatType>::max());
|
||||
|
||||
Reference in New Issue
Block a user