mirror of
https://github.com/HChaZZY/Stockfish.git
synced 2025-12-06 10:53:50 +08:00
Move SIMD code to a separate header
Passed Non-regression STC: LLR: 2.94 (-2.94,2.94) <-1.75,0.25> Total: 115328 W: 29903 L: 29777 D: 55648 Ptnml(0-2): 265, 12293, 32444, 12375, 287 https://tests.stockfishchess.org/tests/view/68300e086ec7634154f9b1d1 closes https://github.com/official-stockfish/Stockfish/pull/6086 no functional change
This commit is contained in:
committed by
Joost VandeVondele
parent
2662d6bf35
commit
472cc764be
@@ -60,9 +60,9 @@ SRCS = benchmark.cpp bitboard.cpp evaluate.cpp main.cpp \
|
||||
|
||||
HEADERS = benchmark.h bitboard.h evaluate.h misc.h movegen.h movepick.h history.h \
|
||||
nnue/nnue_misc.h nnue/features/half_ka_v2_hm.h nnue/layers/affine_transform.h \
|
||||
nnue/layers/affine_transform_sparse_input.h nnue/layers/clipped_relu.h nnue/layers/simd.h \
|
||||
nnue/layers/affine_transform_sparse_input.h nnue/layers/clipped_relu.h \
|
||||
nnue/layers/sqr_clipped_relu.h nnue/nnue_accumulator.h nnue/nnue_architecture.h \
|
||||
nnue/nnue_common.h nnue/nnue_feature_transformer.h position.h \
|
||||
nnue/nnue_common.h nnue/nnue_feature_transformer.h nnue/simd.h position.h \
|
||||
search.h syzygy/tbprobe.h thread.h thread_win32_osx.h timeman.h \
|
||||
tt.h tune.h types.h uci.h ucioption.h perft.h nnue/network.h engine.h score.h numa.h memory.h
|
||||
|
||||
|
||||
16
src/misc.h
16
src/misc.h
@@ -317,6 +317,22 @@ void move_to_front(std::vector<T>& vec, Predicate pred) {
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(__GNUC__) && !defined(__clang__)
|
||||
#if __GNUC__ >= 13
|
||||
#define sf_assume(cond) __attribute__((assume(cond)))
|
||||
#else
|
||||
#define sf_assume(cond) \
|
||||
do \
|
||||
{ \
|
||||
if (!(cond)) \
|
||||
__builtin_unreachable(); \
|
||||
} while (0)
|
||||
#endif
|
||||
#else
|
||||
// do nothing for other compilers
|
||||
#define sf_assume(cond)
|
||||
#endif
|
||||
|
||||
} // namespace Stockfish
|
||||
|
||||
#endif // #ifndef MISC_H_INCLUDED
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#include "../../bitboard.h"
|
||||
#include "../../position.h"
|
||||
#include "../../types.h"
|
||||
#include "../nnue_accumulator.h"
|
||||
#include "../nnue_common.h"
|
||||
|
||||
namespace Stockfish::Eval::NNUE::Features {
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
#include <iostream>
|
||||
|
||||
#include "../nnue_common.h"
|
||||
#include "simd.h"
|
||||
#include "../simd.h"
|
||||
|
||||
/*
|
||||
This file contains the definition for a fully connected layer (aka affine transform).
|
||||
@@ -102,7 +102,7 @@ static void affine_transform_non_ssse3(std::int32_t* output,
|
||||
product = vmlal_s8(product, inputVector[j * 2 + 1], row[j * 2 + 1]);
|
||||
sum = vpadalq_s16(sum, product);
|
||||
}
|
||||
output[i] = Simd::neon_m128_reduce_add_epi32(sum);
|
||||
output[i] = SIMD::neon_m128_reduce_add_epi32(sum);
|
||||
|
||||
#endif
|
||||
}
|
||||
@@ -191,20 +191,20 @@ class AffineTransform {
|
||||
#if defined(USE_AVX512)
|
||||
using vec_t = __m512i;
|
||||
#define vec_set_32 _mm512_set1_epi32
|
||||
#define vec_add_dpbusd_32 Simd::m512_add_dpbusd_epi32
|
||||
#define vec_add_dpbusd_32 SIMD::m512_add_dpbusd_epi32
|
||||
#elif defined(USE_AVX2)
|
||||
using vec_t = __m256i;
|
||||
#define vec_set_32 _mm256_set1_epi32
|
||||
#define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
|
||||
#define vec_add_dpbusd_32 SIMD::m256_add_dpbusd_epi32
|
||||
#elif defined(USE_SSSE3)
|
||||
using vec_t = __m128i;
|
||||
#define vec_set_32 _mm_set1_epi32
|
||||
#define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
|
||||
#define vec_add_dpbusd_32 SIMD::m128_add_dpbusd_epi32
|
||||
#elif defined(USE_NEON_DOTPROD)
|
||||
using vec_t = int32x4_t;
|
||||
#define vec_set_32 vdupq_n_s32
|
||||
#define vec_add_dpbusd_32(acc, a, b) \
|
||||
Simd::dotprod_m128_add_dpbusd_epi32(acc, vreinterpretq_s8_s32(a), \
|
||||
SIMD::dotprod_m128_add_dpbusd_epi32(acc, vreinterpretq_s8_s32(a), \
|
||||
vreinterpretq_s8_s32(b))
|
||||
#endif
|
||||
|
||||
@@ -245,20 +245,20 @@ class AffineTransform {
|
||||
#if defined(USE_AVX2)
|
||||
using vec_t = __m256i;
|
||||
#define vec_setzero() _mm256_setzero_si256()
|
||||
#define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
|
||||
#define vec_hadd Simd::m256_hadd
|
||||
#define vec_add_dpbusd_32 SIMD::m256_add_dpbusd_epi32
|
||||
#define vec_hadd SIMD::m256_hadd
|
||||
#elif defined(USE_SSSE3)
|
||||
using vec_t = __m128i;
|
||||
#define vec_setzero() _mm_setzero_si128()
|
||||
#define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
|
||||
#define vec_hadd Simd::m128_hadd
|
||||
#define vec_add_dpbusd_32 SIMD::m128_add_dpbusd_epi32
|
||||
#define vec_hadd SIMD::m128_hadd
|
||||
#elif defined(USE_NEON_DOTPROD)
|
||||
using vec_t = int32x4_t;
|
||||
#define vec_setzero() vdupq_n_s32(0)
|
||||
#define vec_add_dpbusd_32(acc, a, b) \
|
||||
Simd::dotprod_m128_add_dpbusd_epi32(acc, vreinterpretq_s8_s32(a), \
|
||||
SIMD::dotprod_m128_add_dpbusd_epi32(acc, vreinterpretq_s8_s32(a), \
|
||||
vreinterpretq_s8_s32(b))
|
||||
#define vec_hadd Simd::neon_m128_hadd
|
||||
#define vec_hadd SIMD::neon_m128_hadd
|
||||
#endif
|
||||
|
||||
const auto inputVector = reinterpret_cast<const vec_t*>(input);
|
||||
|
||||
@@ -22,14 +22,12 @@
|
||||
#define NNUE_LAYERS_AFFINE_TRANSFORM_SPARSE_INPUT_H_INCLUDED
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
#include <iostream>
|
||||
|
||||
#include "../../bitboard.h"
|
||||
#include "../simd.h"
|
||||
#include "../nnue_common.h"
|
||||
#include "affine_transform.h"
|
||||
#include "simd.h"
|
||||
|
||||
/*
|
||||
This file contains the definition for a fully connected layer (aka affine transform) with block sparse input.
|
||||
@@ -77,53 +75,16 @@ alignas(CacheLineSize) static constexpr struct OffsetIndices {
|
||||
// Find indices of nonzero numbers in an int32_t array
|
||||
template<const IndexType InputDimensions>
|
||||
void find_nnz(const std::int32_t* input, std::uint16_t* out, IndexType& count_out) {
|
||||
#if defined(USE_SSSE3)
|
||||
#if defined(USE_AVX512)
|
||||
using vec_t = __m512i;
|
||||
#define vec_nnz(a) _mm512_cmpgt_epi32_mask(a, _mm512_setzero_si512())
|
||||
#elif defined(USE_AVX2)
|
||||
using vec_t = __m256i;
|
||||
#if defined(USE_VNNI) && !defined(USE_AVXVNNI)
|
||||
#define vec_nnz(a) _mm256_cmpgt_epi32_mask(a, _mm256_setzero_si256())
|
||||
#else
|
||||
#define vec_nnz(a) \
|
||||
_mm256_movemask_ps( \
|
||||
_mm256_castsi256_ps(_mm256_cmpgt_epi32(a, _mm256_setzero_si256())))
|
||||
#endif
|
||||
#elif defined(USE_SSSE3)
|
||||
using vec_t = __m128i;
|
||||
#define vec_nnz(a) \
|
||||
_mm_movemask_ps(_mm_castsi128_ps(_mm_cmpgt_epi32(a, _mm_setzero_si128())))
|
||||
#endif
|
||||
using vec128_t = __m128i;
|
||||
#define vec128_zero _mm_setzero_si128()
|
||||
#define vec128_set_16(a) _mm_set1_epi16(a)
|
||||
#if (USE_SSE41)
|
||||
#define vec128_load(a) _mm_cvtepu8_epi16(_mm_loadl_epi64(a))
|
||||
#else
|
||||
#define vec128_load(a) _mm_load_si128(a)
|
||||
#endif
|
||||
#define vec128_storeu(a, b) _mm_storeu_si128(a, b)
|
||||
#define vec128_add(a, b) _mm_add_epi16(a, b)
|
||||
#elif defined(USE_NEON)
|
||||
using vec_t = uint32x4_t;
|
||||
static const std::uint32_t Mask[4] = {1, 2, 4, 8};
|
||||
#define vec_nnz(a) vaddvq_u32(vandq_u32(vtstq_u32(a, a), vld1q_u32(Mask)))
|
||||
using vec128_t = uint16x8_t;
|
||||
#define vec128_zero vdupq_n_u16(0)
|
||||
#define vec128_set_16(a) vdupq_n_u16(a)
|
||||
#define vec128_load(a) vld1q_u16(reinterpret_cast<const std::uint16_t*>(a))
|
||||
#define vec128_storeu(a, b) vst1q_u16(reinterpret_cast<std::uint16_t*>(a), b)
|
||||
#define vec128_add(a, b) vaddq_u16(a, b)
|
||||
#endif
|
||||
constexpr IndexType InputSimdWidth = sizeof(vec_t) / sizeof(std::int32_t);
|
||||
using namespace SIMD;
|
||||
|
||||
constexpr IndexType InputSimdWidth = sizeof(vec_uint_t) / sizeof(std::int32_t);
|
||||
// Inputs are processed InputSimdWidth at a time and outputs are processed 8 at a time so we process in chunks of max(InputSimdWidth, 8)
|
||||
constexpr IndexType ChunkSize = std::max<IndexType>(InputSimdWidth, 8);
|
||||
constexpr IndexType NumChunks = InputDimensions / ChunkSize;
|
||||
constexpr IndexType InputsPerChunk = ChunkSize / InputSimdWidth;
|
||||
constexpr IndexType OutputsPerChunk = ChunkSize / 8;
|
||||
|
||||
const auto inputVector = reinterpret_cast<const vec_t*>(input);
|
||||
const auto inputVector = reinterpret_cast<const vec_uint_t*>(input);
|
||||
IndexType count = 0;
|
||||
vec128_t base = vec128_zero;
|
||||
const vec128_t increment = vec128_set_16(8);
|
||||
@@ -133,7 +94,7 @@ void find_nnz(const std::int32_t* input, std::uint16_t* out, IndexType& count_ou
|
||||
unsigned nnz = 0;
|
||||
for (IndexType j = 0; j < InputsPerChunk; ++j)
|
||||
{
|
||||
const vec_t inputChunk = inputVector[i * InputsPerChunk + j];
|
||||
const vec_uint_t inputChunk = inputVector[i * InputsPerChunk + j];
|
||||
nnz |= unsigned(vec_nnz(inputChunk)) << (j * InputSimdWidth);
|
||||
}
|
||||
for (IndexType j = 0; j < OutputsPerChunk; ++j)
|
||||
@@ -148,12 +109,7 @@ void find_nnz(const std::int32_t* input, std::uint16_t* out, IndexType& count_ou
|
||||
}
|
||||
count_out = count;
|
||||
}
|
||||
#undef vec_nnz
|
||||
#undef vec128_zero
|
||||
#undef vec128_set_16
|
||||
#undef vec128_load
|
||||
#undef vec128_storeu
|
||||
#undef vec128_add
|
||||
|
||||
#endif
|
||||
|
||||
// Sparse input implementation
|
||||
@@ -232,27 +188,27 @@ class AffineTransformSparseInput {
|
||||
using invec_t = __m512i;
|
||||
using outvec_t = __m512i;
|
||||
#define vec_set_32 _mm512_set1_epi32
|
||||
#define vec_add_dpbusd_32 Simd::m512_add_dpbusd_epi32
|
||||
#define vec_add_dpbusd_32 SIMD::m512_add_dpbusd_epi32
|
||||
#elif defined(USE_AVX2)
|
||||
using invec_t = __m256i;
|
||||
using outvec_t = __m256i;
|
||||
#define vec_set_32 _mm256_set1_epi32
|
||||
#define vec_add_dpbusd_32 Simd::m256_add_dpbusd_epi32
|
||||
#define vec_add_dpbusd_32 SIMD::m256_add_dpbusd_epi32
|
||||
#elif defined(USE_SSSE3)
|
||||
using invec_t = __m128i;
|
||||
using outvec_t = __m128i;
|
||||
#define vec_set_32 _mm_set1_epi32
|
||||
#define vec_add_dpbusd_32 Simd::m128_add_dpbusd_epi32
|
||||
#define vec_add_dpbusd_32 SIMD::m128_add_dpbusd_epi32
|
||||
#elif defined(USE_NEON_DOTPROD)
|
||||
using invec_t = int8x16_t;
|
||||
using outvec_t = int32x4_t;
|
||||
#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
|
||||
#define vec_add_dpbusd_32 Simd::dotprod_m128_add_dpbusd_epi32
|
||||
#define vec_add_dpbusd_32 SIMD::dotprod_m128_add_dpbusd_epi32
|
||||
#elif defined(USE_NEON)
|
||||
using invec_t = int8x16_t;
|
||||
using outvec_t = int32x4_t;
|
||||
#define vec_set_32(a) vreinterpretq_s8_u32(vdupq_n_u32(a))
|
||||
#define vec_add_dpbusd_32 Simd::neon_m128_add_dpbusd_epi32
|
||||
#define vec_add_dpbusd_32 SIMD::neon_m128_add_dpbusd_epi32
|
||||
#endif
|
||||
static constexpr IndexType OutputSimdWidth = sizeof(outvec_t) / sizeof(OutputType);
|
||||
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
/*
|
||||
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
|
||||
Copyright (C) 2004-2025 The Stockfish developers (see AUTHORS file)
|
||||
|
||||
Stockfish is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Stockfish is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef STOCKFISH_SIMD_H_INCLUDED
|
||||
#define STOCKFISH_SIMD_H_INCLUDED
|
||||
|
||||
#if defined(USE_AVX2)
|
||||
#include <immintrin.h>
|
||||
|
||||
#elif defined(USE_SSE41)
|
||||
#include <smmintrin.h>
|
||||
|
||||
#elif defined(USE_SSSE3)
|
||||
#include <tmmintrin.h>
|
||||
|
||||
#elif defined(USE_SSE2)
|
||||
#include <emmintrin.h>
|
||||
|
||||
#elif defined(USE_NEON)
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
namespace Stockfish::Simd {
|
||||
|
||||
#if defined(USE_AVX512)
|
||||
|
||||
[[maybe_unused]] static int m512_hadd(__m512i sum, int bias) {
|
||||
return _mm512_reduce_add_epi32(sum) + bias;
|
||||
}
|
||||
|
||||
[[maybe_unused]] static void m512_add_dpbusd_epi32(__m512i& acc, __m512i a, __m512i b) {
|
||||
|
||||
#if defined(USE_VNNI)
|
||||
acc = _mm512_dpbusd_epi32(acc, a, b);
|
||||
#else
|
||||
__m512i product0 = _mm512_maddubs_epi16(a, b);
|
||||
product0 = _mm512_madd_epi16(product0, _mm512_set1_epi16(1));
|
||||
acc = _mm512_add_epi32(acc, product0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(USE_AVX2)
|
||||
|
||||
[[maybe_unused]] static int m256_hadd(__m256i sum, int bias) {
|
||||
__m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
|
||||
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
|
||||
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
|
||||
return _mm_cvtsi128_si32(sum128) + bias;
|
||||
}
|
||||
|
||||
[[maybe_unused]] static void m256_add_dpbusd_epi32(__m256i& acc, __m256i a, __m256i b) {
|
||||
|
||||
#if defined(USE_VNNI)
|
||||
acc = _mm256_dpbusd_epi32(acc, a, b);
|
||||
#else
|
||||
__m256i product0 = _mm256_maddubs_epi16(a, b);
|
||||
product0 = _mm256_madd_epi16(product0, _mm256_set1_epi16(1));
|
||||
acc = _mm256_add_epi32(acc, product0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(USE_SSSE3)
|
||||
|
||||
[[maybe_unused]] static int m128_hadd(__m128i sum, int bias) {
|
||||
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
|
||||
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
|
||||
return _mm_cvtsi128_si32(sum) + bias;
|
||||
}
|
||||
|
||||
[[maybe_unused]] static void m128_add_dpbusd_epi32(__m128i& acc, __m128i a, __m128i b) {
|
||||
|
||||
__m128i product0 = _mm_maddubs_epi16(a, b);
|
||||
product0 = _mm_madd_epi16(product0, _mm_set1_epi16(1));
|
||||
acc = _mm_add_epi32(acc, product0);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(USE_NEON_DOTPROD)
|
||||
|
||||
[[maybe_unused]] static void
|
||||
dotprod_m128_add_dpbusd_epi32(int32x4_t& acc, int8x16_t a, int8x16_t b) {
|
||||
|
||||
acc = vdotq_s32(acc, a, b);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(USE_NEON)
|
||||
|
||||
[[maybe_unused]] static int neon_m128_reduce_add_epi32(int32x4_t s) {
|
||||
#if USE_NEON >= 8
|
||||
return vaddvq_s32(s);
|
||||
#else
|
||||
return s[0] + s[1] + s[2] + s[3];
|
||||
#endif
|
||||
}
|
||||
|
||||
[[maybe_unused]] static int neon_m128_hadd(int32x4_t sum, int bias) {
|
||||
return neon_m128_reduce_add_epi32(sum) + bias;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if USE_NEON >= 8
|
||||
[[maybe_unused]] static void neon_m128_add_dpbusd_epi32(int32x4_t& acc, int8x16_t a, int8x16_t b) {
|
||||
|
||||
int16x8_t product0 = vmull_s8(vget_low_s8(a), vget_low_s8(b));
|
||||
int16x8_t product1 = vmull_high_s8(a, b);
|
||||
int16x8_t sum = vpaddq_s16(product0, product1);
|
||||
acc = vpadalq_s16(acc, sum);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // STOCKFISH_SIMD_H_INCLUDED
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "../types.h"
|
||||
#include "nnue_accumulator.h"
|
||||
#include "nnue_architecture.h"
|
||||
#include "nnue_common.h"
|
||||
#include "nnue_feature_transformer.h"
|
||||
#include "nnue_misc.h"
|
||||
|
||||
|
||||
@@ -28,25 +28,12 @@
|
||||
#include "../position.h"
|
||||
#include "../types.h"
|
||||
#include "nnue_architecture.h"
|
||||
#include "nnue_feature_transformer.h"
|
||||
#include "nnue_feature_transformer.h" // IWYU pragma: keep
|
||||
#include "simd.h"
|
||||
|
||||
namespace Stockfish::Eval::NNUE {
|
||||
|
||||
#if defined(__GNUC__) && !defined(__clang__)
|
||||
#if __GNUC__ >= 13
|
||||
#define sf_assume(cond) __attribute__((assume(cond)))
|
||||
#else
|
||||
#define sf_assume(cond) \
|
||||
do \
|
||||
{ \
|
||||
if (!(cond)) \
|
||||
__builtin_unreachable(); \
|
||||
} while (0)
|
||||
#endif
|
||||
#else
|
||||
// do nothing for other compilers
|
||||
#define sf_assume(cond)
|
||||
#endif
|
||||
using namespace SIMD;
|
||||
|
||||
namespace {
|
||||
|
||||
@@ -381,7 +368,7 @@ void update_accumulator_refresh_cache(const FeatureTransformer<Dimensions>& feat
|
||||
AccumulatorState& accumulatorState,
|
||||
AccumulatorCaches::Cache<Dimensions>& cache) {
|
||||
|
||||
using Tiling [[maybe_unused]] = SIMDTiling<Dimensions, Dimensions>;
|
||||
using Tiling [[maybe_unused]] = SIMDTiling<Dimensions, Dimensions, PSQTBuckets>;
|
||||
|
||||
const Square ksq = pos.square<KING>(Perspective);
|
||||
auto& entry = cache[ksq][Perspective];
|
||||
|
||||
@@ -37,10 +37,6 @@ class Position;
|
||||
|
||||
namespace Stockfish::Eval::NNUE {
|
||||
|
||||
using BiasType = std::int16_t;
|
||||
using PSQTWeightType = std::int32_t;
|
||||
using IndexType = std::uint32_t;
|
||||
|
||||
template<IndexType Size>
|
||||
struct alignas(CacheLineSize) Accumulator;
|
||||
|
||||
|
||||
@@ -49,6 +49,12 @@ constexpr int L3Small = 32;
|
||||
constexpr IndexType PSQTBuckets = 8;
|
||||
constexpr IndexType LayerStacks = 8;
|
||||
|
||||
// If vector instructions are enabled, we update and refresh the
|
||||
// accumulator tile by tile such that each tile fits in the CPU's
|
||||
// vector registers.
|
||||
static_assert(PSQTBuckets % 8 == 0,
|
||||
"Per feature PSQT values cannot be processed at granularity lower than 8 at a time.");
|
||||
|
||||
template<IndexType L1, int L2, int L3>
|
||||
struct NetworkArchitecture {
|
||||
static constexpr IndexType TransformedFeatureDimensions = L1;
|
||||
|
||||
@@ -48,6 +48,11 @@
|
||||
|
||||
namespace Stockfish::Eval::NNUE {
|
||||
|
||||
using BiasType = std::int16_t;
|
||||
using WeightType = std::int16_t;
|
||||
using PSQTWeightType = std::int32_t;
|
||||
using IndexType = std::uint32_t;
|
||||
|
||||
// Version of the evaluation file
|
||||
constexpr std::uint32_t Version = 0x7AF32F20u;
|
||||
|
||||
|
||||
@@ -31,174 +31,10 @@
|
||||
#include "nnue_accumulator.h"
|
||||
#include "nnue_architecture.h"
|
||||
#include "nnue_common.h"
|
||||
#include "simd.h"
|
||||
|
||||
namespace Stockfish::Eval::NNUE {
|
||||
|
||||
using BiasType = std::int16_t;
|
||||
using WeightType = std::int16_t;
|
||||
using PSQTWeightType = std::int32_t;
|
||||
|
||||
// If vector instructions are enabled, we update and refresh the
|
||||
// accumulator tile by tile such that each tile fits in the CPU's
|
||||
// vector registers.
|
||||
#define VECTOR
|
||||
|
||||
static_assert(PSQTBuckets % 8 == 0,
|
||||
"Per feature PSQT values cannot be processed at granularity lower than 8 at a time.");
|
||||
|
||||
#ifdef USE_AVX512
|
||||
using vec_t = __m512i;
|
||||
using psqt_vec_t = __m256i;
|
||||
#define vec_load(a) _mm512_load_si512(a)
|
||||
#define vec_store(a, b) _mm512_store_si512(a, b)
|
||||
#define vec_add_16(a, b) _mm512_add_epi16(a, b)
|
||||
#define vec_sub_16(a, b) _mm512_sub_epi16(a, b)
|
||||
#define vec_mulhi_16(a, b) _mm512_mulhi_epi16(a, b)
|
||||
#define vec_zero() _mm512_setzero_epi32()
|
||||
#define vec_set_16(a) _mm512_set1_epi16(a)
|
||||
#define vec_max_16(a, b) _mm512_max_epi16(a, b)
|
||||
#define vec_min_16(a, b) _mm512_min_epi16(a, b)
|
||||
#define vec_slli_16(a, b) _mm512_slli_epi16(a, b)
|
||||
// Inverse permuted at load time
|
||||
#define vec_packus_16(a, b) _mm512_packus_epi16(a, b)
|
||||
#define vec_load_psqt(a) _mm256_load_si256(a)
|
||||
#define vec_store_psqt(a, b) _mm256_store_si256(a, b)
|
||||
#define vec_add_psqt_32(a, b) _mm256_add_epi32(a, b)
|
||||
#define vec_sub_psqt_32(a, b) _mm256_sub_epi32(a, b)
|
||||
#define vec_zero_psqt() _mm256_setzero_si256()
|
||||
#define NumRegistersSIMD 16
|
||||
#define MaxChunkSize 64
|
||||
|
||||
#elif USE_AVX2
|
||||
using vec_t = __m256i;
|
||||
using psqt_vec_t = __m256i;
|
||||
#define vec_load(a) _mm256_load_si256(a)
|
||||
#define vec_store(a, b) _mm256_store_si256(a, b)
|
||||
#define vec_add_16(a, b) _mm256_add_epi16(a, b)
|
||||
#define vec_sub_16(a, b) _mm256_sub_epi16(a, b)
|
||||
#define vec_mulhi_16(a, b) _mm256_mulhi_epi16(a, b)
|
||||
#define vec_zero() _mm256_setzero_si256()
|
||||
#define vec_set_16(a) _mm256_set1_epi16(a)
|
||||
#define vec_max_16(a, b) _mm256_max_epi16(a, b)
|
||||
#define vec_min_16(a, b) _mm256_min_epi16(a, b)
|
||||
#define vec_slli_16(a, b) _mm256_slli_epi16(a, b)
|
||||
// Inverse permuted at load time
|
||||
#define vec_packus_16(a, b) _mm256_packus_epi16(a, b)
|
||||
#define vec_load_psqt(a) _mm256_load_si256(a)
|
||||
#define vec_store_psqt(a, b) _mm256_store_si256(a, b)
|
||||
#define vec_add_psqt_32(a, b) _mm256_add_epi32(a, b)
|
||||
#define vec_sub_psqt_32(a, b) _mm256_sub_epi32(a, b)
|
||||
#define vec_zero_psqt() _mm256_setzero_si256()
|
||||
#define NumRegistersSIMD 16
|
||||
#define MaxChunkSize 32
|
||||
|
||||
#elif USE_SSE2
|
||||
using vec_t = __m128i;
|
||||
using psqt_vec_t = __m128i;
|
||||
#define vec_load(a) (*(a))
|
||||
#define vec_store(a, b) *(a) = (b)
|
||||
#define vec_add_16(a, b) _mm_add_epi16(a, b)
|
||||
#define vec_sub_16(a, b) _mm_sub_epi16(a, b)
|
||||
#define vec_mulhi_16(a, b) _mm_mulhi_epi16(a, b)
|
||||
#define vec_zero() _mm_setzero_si128()
|
||||
#define vec_set_16(a) _mm_set1_epi16(a)
|
||||
#define vec_max_16(a, b) _mm_max_epi16(a, b)
|
||||
#define vec_min_16(a, b) _mm_min_epi16(a, b)
|
||||
#define vec_slli_16(a, b) _mm_slli_epi16(a, b)
|
||||
#define vec_packus_16(a, b) _mm_packus_epi16(a, b)
|
||||
#define vec_load_psqt(a) (*(a))
|
||||
#define vec_store_psqt(a, b) *(a) = (b)
|
||||
#define vec_add_psqt_32(a, b) _mm_add_epi32(a, b)
|
||||
#define vec_sub_psqt_32(a, b) _mm_sub_epi32(a, b)
|
||||
#define vec_zero_psqt() _mm_setzero_si128()
|
||||
#define NumRegistersSIMD (Is64Bit ? 16 : 8)
|
||||
#define MaxChunkSize 16
|
||||
|
||||
#elif USE_NEON
|
||||
using vec_t = int16x8_t;
|
||||
using psqt_vec_t = int32x4_t;
|
||||
#define vec_load(a) (*(a))
|
||||
#define vec_store(a, b) *(a) = (b)
|
||||
#define vec_add_16(a, b) vaddq_s16(a, b)
|
||||
#define vec_sub_16(a, b) vsubq_s16(a, b)
|
||||
#define vec_mulhi_16(a, b) vqdmulhq_s16(a, b)
|
||||
#define vec_zero() vec_t{0}
|
||||
#define vec_set_16(a) vdupq_n_s16(a)
|
||||
#define vec_max_16(a, b) vmaxq_s16(a, b)
|
||||
#define vec_min_16(a, b) vminq_s16(a, b)
|
||||
#define vec_slli_16(a, b) vshlq_s16(a, vec_set_16(b))
|
||||
#define vec_packus_16(a, b) reinterpret_cast<vec_t>(vcombine_u8(vqmovun_s16(a), vqmovun_s16(b)))
|
||||
#define vec_load_psqt(a) (*(a))
|
||||
#define vec_store_psqt(a, b) *(a) = (b)
|
||||
#define vec_add_psqt_32(a, b) vaddq_s32(a, b)
|
||||
#define vec_sub_psqt_32(a, b) vsubq_s32(a, b)
|
||||
#define vec_zero_psqt() psqt_vec_t{0}
|
||||
#define NumRegistersSIMD 16
|
||||
#define MaxChunkSize 16
|
||||
|
||||
#else
|
||||
#undef VECTOR
|
||||
|
||||
#endif
|
||||
|
||||
struct Vec16Wrapper {
|
||||
#ifdef VECTOR
|
||||
using type = vec_t;
|
||||
static type add(const type& lhs, const type& rhs) { return vec_add_16(lhs, rhs); }
|
||||
static type sub(const type& lhs, const type& rhs) { return vec_sub_16(lhs, rhs); }
|
||||
#else
|
||||
using type = BiasType;
|
||||
static type add(const type& lhs, const type& rhs) { return lhs + rhs; }
|
||||
static type sub(const type& lhs, const type& rhs) { return lhs - rhs; }
|
||||
#endif
|
||||
};
|
||||
|
||||
struct Vec32Wrapper {
|
||||
#ifdef VECTOR
|
||||
using type = psqt_vec_t;
|
||||
static type add(const type& lhs, const type& rhs) { return vec_add_psqt_32(lhs, rhs); }
|
||||
static type sub(const type& lhs, const type& rhs) { return vec_sub_psqt_32(lhs, rhs); }
|
||||
#else
|
||||
using type = PSQTWeightType;
|
||||
static type add(const type& lhs, const type& rhs) { return lhs + rhs; }
|
||||
static type sub(const type& lhs, const type& rhs) { return lhs - rhs; }
|
||||
#endif
|
||||
};
|
||||
|
||||
enum UpdateOperation {
|
||||
Add,
|
||||
Sub
|
||||
};
|
||||
|
||||
template<typename VecWrapper,
|
||||
UpdateOperation... ops,
|
||||
std::enable_if_t<sizeof...(ops) == 0, bool> = true>
|
||||
typename VecWrapper::type fused(const typename VecWrapper::type& in) {
|
||||
return in;
|
||||
}
|
||||
|
||||
template<typename VecWrapper,
|
||||
UpdateOperation update_op,
|
||||
UpdateOperation... ops,
|
||||
typename T,
|
||||
typename... Ts,
|
||||
std::enable_if_t<is_all_same_v<typename VecWrapper::type, T, Ts...>, bool> = true,
|
||||
std::enable_if_t<sizeof...(ops) == sizeof...(Ts), bool> = true>
|
||||
typename VecWrapper::type
|
||||
fused(const typename VecWrapper::type& in, const T& operand, const Ts&... operands) {
|
||||
switch (update_op)
|
||||
{
|
||||
case Add :
|
||||
return fused<VecWrapper, ops...>(VecWrapper::add(in, operand), operands...);
|
||||
case Sub :
|
||||
return fused<VecWrapper, ops...>(VecWrapper::sub(in, operand), operands...);
|
||||
default :
|
||||
static_assert(update_op == Add || update_op == Sub,
|
||||
"Only Add and Sub are currently supported.");
|
||||
return typename VecWrapper::type();
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the inverse of a permutation
|
||||
template<std::size_t Len>
|
||||
constexpr std::array<std::size_t, Len>
|
||||
@@ -240,61 +76,6 @@ void permute(T (&data)[N], const std::array<std::size_t, OrderSize>& order) {
|
||||
}
|
||||
}
|
||||
|
||||
// Compute optimal SIMD register count for feature transformer accumulation.
|
||||
template<IndexType TransformedFeatureWidth, IndexType HalfDimensions>
|
||||
class SIMDTiling {
|
||||
#ifdef VECTOR
|
||||
// We use __m* types as template arguments, which causes GCC to emit warnings
|
||||
// about losing some attribute information. This is irrelevant to us as we
|
||||
// only take their size, so the following pragma are harmless.
|
||||
#if defined(__GNUC__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wignored-attributes"
|
||||
#endif
|
||||
|
||||
template<typename SIMDRegisterType, typename LaneType, int NumLanes, int MaxRegisters>
|
||||
static constexpr int BestRegisterCount() {
|
||||
constexpr std::size_t RegisterSize = sizeof(SIMDRegisterType);
|
||||
constexpr std::size_t LaneSize = sizeof(LaneType);
|
||||
|
||||
static_assert(RegisterSize >= LaneSize);
|
||||
static_assert(MaxRegisters <= NumRegistersSIMD);
|
||||
static_assert(MaxRegisters > 0);
|
||||
static_assert(NumRegistersSIMD > 0);
|
||||
static_assert(RegisterSize % LaneSize == 0);
|
||||
static_assert((NumLanes * LaneSize) % RegisterSize == 0);
|
||||
|
||||
const int ideal = (NumLanes * LaneSize) / RegisterSize;
|
||||
if (ideal <= MaxRegisters)
|
||||
return ideal;
|
||||
|
||||
// Look for the largest divisor of the ideal register count that is smaller than MaxRegisters
|
||||
for (int divisor = MaxRegisters; divisor > 1; --divisor)
|
||||
if (ideal % divisor == 0)
|
||||
return divisor;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
public:
|
||||
static constexpr int NumRegs =
|
||||
BestRegisterCount<vec_t, WeightType, TransformedFeatureWidth, NumRegistersSIMD>();
|
||||
static constexpr int NumPsqtRegs =
|
||||
BestRegisterCount<psqt_vec_t, PSQTWeightType, PSQTBuckets, NumRegistersSIMD>();
|
||||
|
||||
static constexpr IndexType TileHeight = NumRegs * sizeof(vec_t) / 2;
|
||||
static constexpr IndexType PsqtTileHeight = NumPsqtRegs * sizeof(psqt_vec_t) / 4;
|
||||
|
||||
static_assert(HalfDimensions % TileHeight == 0, "TileHeight must divide HalfDimensions");
|
||||
static_assert(PSQTBuckets % PsqtTileHeight == 0, "PsqtTileHeight must divide PSQTBuckets");
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
// Input feature converter
|
||||
template<IndexType TransformedFeatureDimensions>
|
||||
class FeatureTransformer {
|
||||
@@ -397,6 +178,8 @@ class FeatureTransformer {
|
||||
OutputType* output,
|
||||
int bucket) const {
|
||||
|
||||
using namespace SIMD;
|
||||
|
||||
accumulatorStack.evaluate(pos, *this, *cache);
|
||||
const auto& accumulatorState = accumulatorStack.latest();
|
||||
|
||||
|
||||
418
src/nnue/simd.h
Normal file
418
src/nnue/simd.h
Normal file
@@ -0,0 +1,418 @@
|
||||
/*
|
||||
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
|
||||
Copyright (C) 2004-2025 The Stockfish developers (see AUTHORS file)
|
||||
|
||||
Stockfish is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Stockfish is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef NNUE_SIMD_H_INCLUDED
|
||||
#define NNUE_SIMD_H_INCLUDED
|
||||
|
||||
#if defined(USE_AVX2)
|
||||
#include <immintrin.h>
|
||||
|
||||
#elif defined(USE_SSE41)
|
||||
#include <smmintrin.h>
|
||||
|
||||
#elif defined(USE_SSSE3)
|
||||
#include <tmmintrin.h>
|
||||
|
||||
#elif defined(USE_SSE2)
|
||||
#include <emmintrin.h>
|
||||
|
||||
#elif defined(USE_NEON)
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
#include "../types.h"
|
||||
#include "nnue_common.h"
|
||||
|
||||
namespace Stockfish::Eval::NNUE::SIMD {
|
||||
|
||||
// If vector instructions are enabled, we update and refresh the
|
||||
// accumulator tile by tile such that each tile fits in the CPU's
|
||||
// vector registers.
|
||||
#define VECTOR
|
||||
|
||||
#ifdef USE_AVX512
|
||||
using vec_t = __m512i;
|
||||
using vec128_t = __m128i;
|
||||
using psqt_vec_t = __m256i;
|
||||
using vec_uint_t = __m512i;
|
||||
#define vec_load(a) _mm512_load_si512(a)
|
||||
#define vec_store(a, b) _mm512_store_si512(a, b)
|
||||
#define vec_add_16(a, b) _mm512_add_epi16(a, b)
|
||||
#define vec_sub_16(a, b) _mm512_sub_epi16(a, b)
|
||||
#define vec_mulhi_16(a, b) _mm512_mulhi_epi16(a, b)
|
||||
#define vec_zero() _mm512_setzero_epi32()
|
||||
#define vec_set_16(a) _mm512_set1_epi16(a)
|
||||
#define vec_max_16(a, b) _mm512_max_epi16(a, b)
|
||||
#define vec_min_16(a, b) _mm512_min_epi16(a, b)
|
||||
#define vec_slli_16(a, b) _mm512_slli_epi16(a, b)
|
||||
// Inverse permuted at load time
|
||||
#define vec_packus_16(a, b) _mm512_packus_epi16(a, b)
|
||||
#define vec_load_psqt(a) _mm256_load_si256(a)
|
||||
#define vec_store_psqt(a, b) _mm256_store_si256(a, b)
|
||||
#define vec_add_psqt_32(a, b) _mm256_add_epi32(a, b)
|
||||
#define vec_sub_psqt_32(a, b) _mm256_sub_epi32(a, b)
|
||||
#define vec_zero_psqt() _mm256_setzero_si256()
|
||||
|
||||
#ifdef USE_SSSE3
|
||||
#define vec_nnz(a) _mm512_cmpgt_epi32_mask(a, _mm512_setzero_si512())
|
||||
#endif
|
||||
|
||||
#define vec128_zero _mm_setzero_si128()
|
||||
#define vec128_set_16(a) _mm_set1_epi16(a)
|
||||
#if (USE_SSE41)
|
||||
#define vec128_load(a) _mm_cvtepu8_epi16(_mm_loadl_epi64(a))
|
||||
#else
|
||||
#define vec128_load(a) _mm_load_si128(a)
|
||||
#endif
|
||||
#define vec128_storeu(a, b) _mm_storeu_si128(a, b)
|
||||
#define vec128_add(a, b) _mm_add_epi16(a, b)
|
||||
#define NumRegistersSIMD 16
|
||||
#define MaxChunkSize 64
|
||||
|
||||
#elif USE_AVX2
|
||||
using vec_t = __m256i;
|
||||
using vec128_t = __m128i;
|
||||
using psqt_vec_t = __m256i;
|
||||
using vec_uint_t = __m256i;
|
||||
#define vec_load(a) _mm256_load_si256(a)
|
||||
#define vec_store(a, b) _mm256_store_si256(a, b)
|
||||
#define vec_add_16(a, b) _mm256_add_epi16(a, b)
|
||||
#define vec_sub_16(a, b) _mm256_sub_epi16(a, b)
|
||||
#define vec_mulhi_16(a, b) _mm256_mulhi_epi16(a, b)
|
||||
#define vec_zero() _mm256_setzero_si256()
|
||||
#define vec_set_16(a) _mm256_set1_epi16(a)
|
||||
#define vec_max_16(a, b) _mm256_max_epi16(a, b)
|
||||
#define vec_min_16(a, b) _mm256_min_epi16(a, b)
|
||||
#define vec_slli_16(a, b) _mm256_slli_epi16(a, b)
|
||||
// Inverse permuted at load time
|
||||
#define vec_packus_16(a, b) _mm256_packus_epi16(a, b)
|
||||
#define vec_load_psqt(a) _mm256_load_si256(a)
|
||||
#define vec_store_psqt(a, b) _mm256_store_si256(a, b)
|
||||
#define vec_add_psqt_32(a, b) _mm256_add_epi32(a, b)
|
||||
#define vec_sub_psqt_32(a, b) _mm256_sub_epi32(a, b)
|
||||
#define vec_zero_psqt() _mm256_setzero_si256()
|
||||
|
||||
#ifdef USE_SSSE3
|
||||
#if defined(USE_VNNI) && !defined(USE_AVXVNNI)
|
||||
#define vec_nnz(a) _mm256_cmpgt_epi32_mask(a, _mm256_setzero_si256())
|
||||
#else
|
||||
#define vec_nnz(a) \
|
||||
_mm256_movemask_ps( \
|
||||
_mm256_castsi256_ps(_mm256_cmpgt_epi32(a, _mm256_setzero_si256())))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define vec128_zero _mm_setzero_si128()
|
||||
#define vec128_set_16(a) _mm_set1_epi16(a)
|
||||
#if (USE_SSE41)
|
||||
#define vec128_load(a) _mm_cvtepu8_epi16(_mm_loadl_epi64(a))
|
||||
#else
|
||||
#define vec128_load(a) _mm_load_si128(a)
|
||||
#endif
|
||||
#define vec128_storeu(a, b) _mm_storeu_si128(a, b)
|
||||
#define vec128_add(a, b) _mm_add_epi16(a, b)
|
||||
|
||||
#define NumRegistersSIMD 16
|
||||
#define MaxChunkSize 32
|
||||
|
||||
#elif USE_SSE2
|
||||
using vec_t = __m128i;
|
||||
using vec128_t = __m128i;
|
||||
using psqt_vec_t = __m128i;
|
||||
using vec_uint_t = __m128i;
|
||||
#define vec_load(a) (*(a))
|
||||
#define vec_store(a, b) *(a) = (b)
|
||||
#define vec_add_16(a, b) _mm_add_epi16(a, b)
|
||||
#define vec_sub_16(a, b) _mm_sub_epi16(a, b)
|
||||
#define vec_mulhi_16(a, b) _mm_mulhi_epi16(a, b)
|
||||
#define vec_zero() _mm_setzero_si128()
|
||||
#define vec_set_16(a) _mm_set1_epi16(a)
|
||||
#define vec_max_16(a, b) _mm_max_epi16(a, b)
|
||||
#define vec_min_16(a, b) _mm_min_epi16(a, b)
|
||||
#define vec_slli_16(a, b) _mm_slli_epi16(a, b)
|
||||
#define vec_packus_16(a, b) _mm_packus_epi16(a, b)
|
||||
#define vec_load_psqt(a) (*(a))
|
||||
#define vec_store_psqt(a, b) *(a) = (b)
|
||||
#define vec_add_psqt_32(a, b) _mm_add_epi32(a, b)
|
||||
#define vec_sub_psqt_32(a, b) _mm_sub_epi32(a, b)
|
||||
#define vec_zero_psqt() _mm_setzero_si128()
|
||||
|
||||
#ifdef USE_SSSE3
|
||||
#define vec_nnz(a) \
|
||||
_mm_movemask_ps(_mm_castsi128_ps(_mm_cmpgt_epi32(a, _mm_setzero_si128())))
|
||||
#endif
|
||||
|
||||
#define vec128_zero _mm_setzero_si128()
|
||||
#define vec128_set_16(a) _mm_set1_epi16(a)
|
||||
#if (USE_SSE41)
|
||||
#define vec128_load(a) _mm_cvtepu8_epi16(_mm_loadl_epi64(a))
|
||||
#else
|
||||
#define vec128_load(a) _mm_load_si128(a)
|
||||
#endif
|
||||
#define vec128_storeu(a, b) _mm_storeu_si128(a, b)
|
||||
#define vec128_add(a, b) _mm_add_epi16(a, b)
|
||||
|
||||
#define NumRegistersSIMD (Is64Bit ? 16 : 8)
|
||||
#define MaxChunkSize 16
|
||||
|
||||
#elif USE_NEON
|
||||
using vec_t = int16x8_t;
|
||||
using psqt_vec_t = int32x4_t;
|
||||
using vec128_t = uint16x8_t;
|
||||
using vec_uint_t = uint32x4_t;
|
||||
#define vec_load(a) (*(a))
|
||||
#define vec_store(a, b) *(a) = (b)
|
||||
#define vec_add_16(a, b) vaddq_s16(a, b)
|
||||
#define vec_sub_16(a, b) vsubq_s16(a, b)
|
||||
#define vec_mulhi_16(a, b) vqdmulhq_s16(a, b)
|
||||
#define vec_zero() vec_t{0}
|
||||
#define vec_set_16(a) vdupq_n_s16(a)
|
||||
#define vec_max_16(a, b) vmaxq_s16(a, b)
|
||||
#define vec_min_16(a, b) vminq_s16(a, b)
|
||||
#define vec_slli_16(a, b) vshlq_s16(a, vec_set_16(b))
|
||||
#define vec_packus_16(a, b) reinterpret_cast<vec_t>(vcombine_u8(vqmovun_s16(a), vqmovun_s16(b)))
|
||||
#define vec_load_psqt(a) (*(a))
|
||||
#define vec_store_psqt(a, b) *(a) = (b)
|
||||
#define vec_add_psqt_32(a, b) vaddq_s32(a, b)
|
||||
#define vec_sub_psqt_32(a, b) vsubq_s32(a, b)
|
||||
#define vec_zero_psqt() psqt_vec_t{0}
|
||||
|
||||
static constexpr std::uint32_t Mask[4] = {1, 2, 4, 8};
|
||||
#define vec_nnz(a) vaddvq_u32(vandq_u32(vtstq_u32(a, a), vld1q_u32(Mask)))
|
||||
#define vec128_zero vdupq_n_u16(0)
|
||||
#define vec128_set_16(a) vdupq_n_u16(a)
|
||||
#define vec128_load(a) vld1q_u16(reinterpret_cast<const std::uint16_t*>(a))
|
||||
#define vec128_storeu(a, b) vst1q_u16(reinterpret_cast<std::uint16_t*>(a), b)
|
||||
#define vec128_add(a, b) vaddq_u16(a, b)
|
||||
|
||||
#define NumRegistersSIMD 16
|
||||
#define MaxChunkSize 16
|
||||
|
||||
#else
|
||||
#undef VECTOR
|
||||
|
||||
#endif
|
||||
|
||||
struct Vec16Wrapper {
|
||||
#ifdef VECTOR
|
||||
using type = vec_t;
|
||||
static type add(const type& lhs, const type& rhs) { return vec_add_16(lhs, rhs); }
|
||||
static type sub(const type& lhs, const type& rhs) { return vec_sub_16(lhs, rhs); }
|
||||
#else
|
||||
using type = BiasType;
|
||||
static type add(const type& lhs, const type& rhs) { return lhs + rhs; }
|
||||
static type sub(const type& lhs, const type& rhs) { return lhs - rhs; }
|
||||
#endif
|
||||
};
|
||||
|
||||
struct Vec32Wrapper {
|
||||
#ifdef VECTOR
|
||||
using type = psqt_vec_t;
|
||||
static type add(const type& lhs, const type& rhs) { return vec_add_psqt_32(lhs, rhs); }
|
||||
static type sub(const type& lhs, const type& rhs) { return vec_sub_psqt_32(lhs, rhs); }
|
||||
#else
|
||||
using type = PSQTWeightType;
|
||||
static type add(const type& lhs, const type& rhs) { return lhs + rhs; }
|
||||
static type sub(const type& lhs, const type& rhs) { return lhs - rhs; }
|
||||
#endif
|
||||
};
|
||||
|
||||
enum UpdateOperation {
|
||||
Add,
|
||||
Sub
|
||||
};
|
||||
|
||||
template<typename VecWrapper,
|
||||
UpdateOperation... ops,
|
||||
std::enable_if_t<sizeof...(ops) == 0, bool> = true>
|
||||
typename VecWrapper::type fused(const typename VecWrapper::type& in) {
|
||||
return in;
|
||||
}
|
||||
|
||||
template<typename VecWrapper,
|
||||
UpdateOperation update_op,
|
||||
UpdateOperation... ops,
|
||||
typename T,
|
||||
typename... Ts,
|
||||
std::enable_if_t<is_all_same_v<typename VecWrapper::type, T, Ts...>, bool> = true,
|
||||
std::enable_if_t<sizeof...(ops) == sizeof...(Ts), bool> = true>
|
||||
typename VecWrapper::type
|
||||
fused(const typename VecWrapper::type& in, const T& operand, const Ts&... operands) {
|
||||
switch (update_op)
|
||||
{
|
||||
case Add :
|
||||
return fused<VecWrapper, ops...>(VecWrapper::add(in, operand), operands...);
|
||||
case Sub :
|
||||
return fused<VecWrapper, ops...>(VecWrapper::sub(in, operand), operands...);
|
||||
default :
|
||||
static_assert(update_op == Add || update_op == Sub,
|
||||
"Only Add and Sub are currently supported.");
|
||||
return typename VecWrapper::type();
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(USE_AVX512)
|
||||
|
||||
[[maybe_unused]] static int m512_hadd(__m512i sum, int bias) {
|
||||
return _mm512_reduce_add_epi32(sum) + bias;
|
||||
}
|
||||
|
||||
[[maybe_unused]] static void m512_add_dpbusd_epi32(__m512i& acc, __m512i a, __m512i b) {
|
||||
|
||||
#if defined(USE_VNNI)
|
||||
acc = _mm512_dpbusd_epi32(acc, a, b);
|
||||
#else
|
||||
__m512i product0 = _mm512_maddubs_epi16(a, b);
|
||||
product0 = _mm512_madd_epi16(product0, _mm512_set1_epi16(1));
|
||||
acc = _mm512_add_epi32(acc, product0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(USE_AVX2)
|
||||
|
||||
[[maybe_unused]] static int m256_hadd(__m256i sum, int bias) {
|
||||
__m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extracti128_si256(sum, 1));
|
||||
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_BADC));
|
||||
sum128 = _mm_add_epi32(sum128, _mm_shuffle_epi32(sum128, _MM_PERM_CDAB));
|
||||
return _mm_cvtsi128_si32(sum128) + bias;
|
||||
}
|
||||
|
||||
[[maybe_unused]] static void m256_add_dpbusd_epi32(__m256i& acc, __m256i a, __m256i b) {
|
||||
|
||||
#if defined(USE_VNNI)
|
||||
acc = _mm256_dpbusd_epi32(acc, a, b);
|
||||
#else
|
||||
__m256i product0 = _mm256_maddubs_epi16(a, b);
|
||||
product0 = _mm256_madd_epi16(product0, _mm256_set1_epi16(1));
|
||||
acc = _mm256_add_epi32(acc, product0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(USE_SSSE3)
|
||||
|
||||
[[maybe_unused]] static int m128_hadd(__m128i sum, int bias) {
|
||||
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0x4E)); //_MM_PERM_BADC
|
||||
sum = _mm_add_epi32(sum, _mm_shuffle_epi32(sum, 0xB1)); //_MM_PERM_CDAB
|
||||
return _mm_cvtsi128_si32(sum) + bias;
|
||||
}
|
||||
|
||||
[[maybe_unused]] static void m128_add_dpbusd_epi32(__m128i& acc, __m128i a, __m128i b) {
|
||||
|
||||
__m128i product0 = _mm_maddubs_epi16(a, b);
|
||||
product0 = _mm_madd_epi16(product0, _mm_set1_epi16(1));
|
||||
acc = _mm_add_epi32(acc, product0);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(USE_NEON_DOTPROD)
|
||||
|
||||
[[maybe_unused]] static void
|
||||
dotprod_m128_add_dpbusd_epi32(int32x4_t& acc, int8x16_t a, int8x16_t b) {
|
||||
|
||||
acc = vdotq_s32(acc, a, b);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(USE_NEON)
|
||||
|
||||
[[maybe_unused]] static int neon_m128_reduce_add_epi32(int32x4_t s) {
|
||||
#if USE_NEON >= 8
|
||||
return vaddvq_s32(s);
|
||||
#else
|
||||
return s[0] + s[1] + s[2] + s[3];
|
||||
#endif
|
||||
}
|
||||
|
||||
[[maybe_unused]] static int neon_m128_hadd(int32x4_t sum, int bias) {
|
||||
return neon_m128_reduce_add_epi32(sum) + bias;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if USE_NEON >= 8
|
||||
[[maybe_unused]] static void neon_m128_add_dpbusd_epi32(int32x4_t& acc, int8x16_t a, int8x16_t b) {
|
||||
|
||||
int16x8_t product0 = vmull_s8(vget_low_s8(a), vget_low_s8(b));
|
||||
int16x8_t product1 = vmull_high_s8(a, b);
|
||||
int16x8_t sum = vpaddq_s16(product0, product1);
|
||||
acc = vpadalq_s16(acc, sum);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// Compute optimal SIMD register count for feature transformer accumulation.
|
||||
template<IndexType TransformedFeatureWidth, IndexType HalfDimensions, IndexType PSQTBuckets>
|
||||
class SIMDTiling {
|
||||
#ifdef VECTOR
|
||||
// We use __m* types as template arguments, which causes GCC to emit warnings
|
||||
// about losing some attribute information. This is irrelevant to us as we
|
||||
// only take their size, so the following pragma are harmless.
|
||||
#if defined(__GNUC__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wignored-attributes"
|
||||
#endif
|
||||
|
||||
template<typename SIMDRegisterType, typename LaneType, int NumLanes, int MaxRegisters>
|
||||
static constexpr int BestRegisterCount() {
|
||||
constexpr std::size_t RegisterSize = sizeof(SIMDRegisterType);
|
||||
constexpr std::size_t LaneSize = sizeof(LaneType);
|
||||
|
||||
static_assert(RegisterSize >= LaneSize);
|
||||
static_assert(MaxRegisters <= NumRegistersSIMD);
|
||||
static_assert(MaxRegisters > 0);
|
||||
static_assert(NumRegistersSIMD > 0);
|
||||
static_assert(RegisterSize % LaneSize == 0);
|
||||
static_assert((NumLanes * LaneSize) % RegisterSize == 0);
|
||||
|
||||
const int ideal = (NumLanes * LaneSize) / RegisterSize;
|
||||
if (ideal <= MaxRegisters)
|
||||
return ideal;
|
||||
|
||||
// Look for the largest divisor of the ideal register count that is smaller than MaxRegisters
|
||||
for (int divisor = MaxRegisters; divisor > 1; --divisor)
|
||||
if (ideal % divisor == 0)
|
||||
return divisor;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
public:
|
||||
static constexpr int NumRegs =
|
||||
BestRegisterCount<vec_t, WeightType, TransformedFeatureWidth, NumRegistersSIMD>();
|
||||
static constexpr int NumPsqtRegs =
|
||||
BestRegisterCount<psqt_vec_t, PSQTWeightType, PSQTBuckets, NumRegistersSIMD>();
|
||||
|
||||
static constexpr IndexType TileHeight = NumRegs * sizeof(vec_t) / 2;
|
||||
static constexpr IndexType PsqtTileHeight = NumPsqtRegs * sizeof(psqt_vec_t) / 4;
|
||||
|
||||
static_assert(HalfDimensions % TileHeight == 0, "TileHeight must divide HalfDimensions");
|
||||
static_assert(PSQTBuckets % PsqtTileHeight == 0, "PsqtTileHeight must divide PSQTBuckets");
|
||||
#endif
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user