mirror of
https://github.com/HChaZZY/Stockfish.git
synced 2025-12-25 03:26:24 +08:00
Align trainer arrays to cache line.
This commit is contained in:
@@ -353,18 +353,18 @@ namespace Eval::NNUE {
|
||||
LayerType* const target_layer_;
|
||||
|
||||
// parameter
|
||||
LearnFloatType biases_[kOutputDimensions];
|
||||
LearnFloatType weights_[kOutputDimensions * kInputDimensions];
|
||||
alignas(kCacheLineSize) LearnFloatType biases_[kOutputDimensions];
|
||||
alignas(kCacheLineSize) LearnFloatType weights_[kOutputDimensions * kInputDimensions];
|
||||
|
||||
// Buffer used for updating parameters
|
||||
LearnFloatType biases_diff_[kOutputDimensions];
|
||||
LearnFloatType weights_diff_[kOutputDimensions * kInputDimensions];
|
||||
alignas(kCacheLineSize) LearnFloatType biases_diff_[kOutputDimensions];
|
||||
alignas(kCacheLineSize) LearnFloatType weights_diff_[kOutputDimensions * kInputDimensions];
|
||||
|
||||
// Forward propagation buffer
|
||||
std::vector<LearnFloatType> output_;
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> output_;
|
||||
|
||||
// buffer for back propagation
|
||||
std::vector<LearnFloatType> gradients_;
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> gradients_;
|
||||
|
||||
// hyper parameter
|
||||
LearnFloatType momentum_;
|
||||
|
||||
@@ -149,10 +149,10 @@ namespace Eval::NNUE {
|
||||
LayerType* const target_layer_;
|
||||
|
||||
// Forward propagation buffer
|
||||
std::vector<LearnFloatType> output_;
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> output_;
|
||||
|
||||
// buffer for back propagation
|
||||
std::vector<LearnFloatType> gradients_;
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> gradients_;
|
||||
|
||||
// Health check statistics
|
||||
LearnFloatType min_activations_[kOutputDimensions];
|
||||
|
||||
@@ -421,11 +421,11 @@ namespace Eval::NNUE {
|
||||
LearnFloatType weights_[kHalfDimensions * kInputDimensions];
|
||||
|
||||
// Buffer used for updating parameters
|
||||
LearnFloatType biases_diff_[kHalfDimensions];
|
||||
std::vector<LearnFloatType> gradients_;
|
||||
alignas(kCacheLineSize) LearnFloatType biases_diff_[kHalfDimensions];
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> gradients_;
|
||||
|
||||
// Forward propagation buffer
|
||||
std::vector<LearnFloatType> output_;
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> output_;
|
||||
|
||||
// Features that appeared in the training data
|
||||
std::bitset<kInputDimensions> observed_features;
|
||||
@@ -437,8 +437,8 @@ namespace Eval::NNUE {
|
||||
// Health check statistics
|
||||
LearnFloatType min_pre_activation_;
|
||||
LearnFloatType max_pre_activation_;
|
||||
LearnFloatType min_activations_[kHalfDimensions];
|
||||
LearnFloatType max_activations_[kHalfDimensions];
|
||||
alignas(kCacheLineSize) LearnFloatType min_activations_[kHalfDimensions];
|
||||
alignas(kCacheLineSize) LearnFloatType max_activations_[kHalfDimensions];
|
||||
};
|
||||
|
||||
} // namespace Eval::NNUE
|
||||
|
||||
@@ -163,7 +163,7 @@ namespace Eval::NNUE {
|
||||
const LearnFloatType* output_;
|
||||
|
||||
// buffer for back propagation
|
||||
std::vector<LearnFloatType> gradients_;
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> gradients_;
|
||||
};
|
||||
|
||||
// Learning: Input layer
|
||||
@@ -256,10 +256,10 @@ namespace Eval::NNUE {
|
||||
const std::shared_ptr<SharedInputTrainer> shared_input_trainer_;
|
||||
|
||||
// Forward propagation buffer
|
||||
std::vector<LearnFloatType> output_;
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> output_;
|
||||
|
||||
// buffer for back propagation
|
||||
std::vector<LearnFloatType> gradients_;
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> gradients_;
|
||||
};
|
||||
|
||||
} // namespace Eval::NNUE
|
||||
|
||||
@@ -184,7 +184,7 @@ namespace Eval::NNUE {
|
||||
LayerType* const target_layer_;
|
||||
|
||||
// Forward propagation buffer
|
||||
std::vector<LearnFloatType> output_;
|
||||
std::vector<LearnFloatType, CacheLineAlignedAllocator<LearnFloatType>> output_;
|
||||
};
|
||||
|
||||
} // namespace Eval::NNUE
|
||||
|
||||
Reference in New Issue
Block a user