Remove nnz lookup table load optimization

Passed Non-regression STC:
LLR: 2.95 (-2.94,2.94) <-1.75,0.25>
Total: 63296 W: 16491 L: 16311 D: 30494
Ptnml(0-2): 129, 6624, 17972, 6784, 139
https://tests.stockfishchess.org/tests/view/6833ce486ec7634154f9cb22

Passed 2nd Non-regression STC:
LLR: 2.97 (-2.94,2.94) <-1.75,0.25>
Total: 369568 W: 95314 L: 95451 D: 178803
Ptnml(0-2): 897, 40231, 102601, 40222, 833
https://tests.stockfishchess.org/tests/view/68355c956ec7634154f9ce07

closes https://github.com/official-stockfish/Stockfish/pull/6100

no functional change
This commit is contained in:
Shawn Xu
2025-05-25 19:12:43 -07:00
committed by Joost VandeVondele
parent 9fd40b9ea8
commit dc85c5a4c9
2 changed files with 3 additions and 19 deletions

View File

@@ -49,11 +49,7 @@ constexpr int constexpr_lsb(uint64_t bb) {
alignas(CacheLineSize) static constexpr struct OffsetIndices { alignas(CacheLineSize) static constexpr struct OffsetIndices {
#if (USE_SSE41)
std::uint8_t offset_indices[256][8];
#else
std::uint16_t offset_indices[256][8]; std::uint16_t offset_indices[256][8];
#endif
constexpr OffsetIndices() : constexpr OffsetIndices() :
offset_indices() { offset_indices() {

View File

@@ -74,11 +74,7 @@ using vec_uint_t = __m512i;
#define vec128_zero _mm_setzero_si128() #define vec128_zero _mm_setzero_si128()
#define vec128_set_16(a) _mm_set1_epi16(a) #define vec128_set_16(a) _mm_set1_epi16(a)
#if (USE_SSE41) #define vec128_load(a) _mm_load_si128(a)
#define vec128_load(a) _mm_cvtepu8_epi16(_mm_loadl_epi64(a))
#else
#define vec128_load(a) _mm_load_si128(a)
#endif
#define vec128_storeu(a, b) _mm_storeu_si128(a, b) #define vec128_storeu(a, b) _mm_storeu_si128(a, b)
#define vec128_add(a, b) _mm_add_epi16(a, b) #define vec128_add(a, b) _mm_add_epi16(a, b)
#define NumRegistersSIMD 16 #define NumRegistersSIMD 16
@@ -119,11 +115,7 @@ using vec_uint_t = __m256i;
#define vec128_zero _mm_setzero_si128() #define vec128_zero _mm_setzero_si128()
#define vec128_set_16(a) _mm_set1_epi16(a) #define vec128_set_16(a) _mm_set1_epi16(a)
#if (USE_SSE41) #define vec128_load(a) _mm_load_si128(a)
#define vec128_load(a) _mm_cvtepu8_epi16(_mm_loadl_epi64(a))
#else
#define vec128_load(a) _mm_load_si128(a)
#endif
#define vec128_storeu(a, b) _mm_storeu_si128(a, b) #define vec128_storeu(a, b) _mm_storeu_si128(a, b)
#define vec128_add(a, b) _mm_add_epi16(a, b) #define vec128_add(a, b) _mm_add_epi16(a, b)
@@ -159,11 +151,7 @@ using vec_uint_t = __m128i;
#define vec128_zero _mm_setzero_si128() #define vec128_zero _mm_setzero_si128()
#define vec128_set_16(a) _mm_set1_epi16(a) #define vec128_set_16(a) _mm_set1_epi16(a)
#if (USE_SSE41) #define vec128_load(a) _mm_load_si128(a)
#define vec128_load(a) _mm_cvtepu8_epi16(_mm_loadl_epi64(a))
#else
#define vec128_load(a) _mm_load_si128(a)
#endif
#define vec128_storeu(a, b) _mm_storeu_si128(a, b) #define vec128_storeu(a, b) _mm_storeu_si128(a, b)
#define vec128_add(a, b) _mm_add_epi16(a, b) #define vec128_add(a, b) _mm_add_epi16(a, b)