diff --git a/src/amalgam/gen/avx512skx.c b/src/amalgam/gen/avx512skx.c index bbbc24478c1..9dd5b1810e9 100644 --- a/src/amalgam/gen/avx512skx.c +++ b/src/amalgam/gen/avx512skx.c @@ -1843,7 +1843,7 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c8__avx512skx( } while (nc != 0); } -void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx( +void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c8__avx512skx( size_t mr, size_t nc, size_t kc, @@ -1857,7 +1857,7 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx( const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); - assert(mr <= 4); + assert(mr <= 7); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); @@ -1882,16 +1882,37 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx( } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); - if XNN_UNPREDICTABLE(mr != 4) { + if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } + const int8_t* a4 = (const int8_t*) ((uintptr_t) a3 + a_stride); + float* c4 = (float*) ((uintptr_t) c3 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 4) { + a4 = a3; + c4 = c3; + } + const int8_t* a5 = (const int8_t*) ((uintptr_t) a4 + a_stride); + float* c5 = (float*) ((uintptr_t) c4 + cm_stride); + if XNN_UNPREDICTABLE(mr < 6) { + a5 = a4; + c5 = c4; + } + const int8_t* a6 = (const int8_t*) ((uintptr_t) a5 + a_stride); + float* c6 = (float*) ((uintptr_t) c5 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 6) { + a6 = a5; + c6 = c5; + } const __mmask16 vbias_mask = _cvtu32_mask16(0x1111); const __m512i vinput_zero_point0 = _mm512_set1_epi32((int) quantization_params[0].zero_point); const __m512i vinput_zero_point1 = _mm512_set1_epi32((int) quantization_params[1].zero_point); const __m512i vinput_zero_point2 = _mm512_set1_epi32((int) quantization_params[2].zero_point); const __m512i vinput_zero_point3 = _mm512_set1_epi32((int) quantization_params[3].zero_point); + const __m512i vinput_zero_point4 = _mm512_set1_epi32((int) quantization_params[4].zero_point); + const __m512i vinput_zero_point5 = _mm512_set1_epi32((int) quantization_params[5].zero_point); + const __m512i vinput_zero_point6 = _mm512_set1_epi32((int) quantization_params[6].zero_point); const __m512 voutput_min = _mm512_set1_ps(params->scalar.min); const __m512 voutput_max = _mm512_set1_ps(params->scalar.max); do { @@ -1916,6 +1937,18 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx( __m512i vacc3x4567 = _mm512_mullo_epi32(vksum4567, vinput_zero_point3); __m512i vacc3x89AB = _mm512_mullo_epi32(vksum89AB, vinput_zero_point3); __m512i vacc3xCDEF = _mm512_mullo_epi32(vksumCDEF, vinput_zero_point3); + __m512i vacc4x0123 = _mm512_mullo_epi32(vksum0123, vinput_zero_point4); + __m512i vacc4x4567 = _mm512_mullo_epi32(vksum4567, vinput_zero_point4); + __m512i vacc4x89AB = _mm512_mullo_epi32(vksum89AB, vinput_zero_point4); + __m512i vacc4xCDEF = _mm512_mullo_epi32(vksumCDEF, vinput_zero_point4); + __m512i vacc5x0123 = _mm512_mullo_epi32(vksum0123, vinput_zero_point5); + __m512i vacc5x4567 = _mm512_mullo_epi32(vksum4567, vinput_zero_point5); + __m512i vacc5x89AB = _mm512_mullo_epi32(vksum89AB, vinput_zero_point5); + __m512i vacc5xCDEF = _mm512_mullo_epi32(vksumCDEF, vinput_zero_point5); + __m512i vacc6x0123 = _mm512_mullo_epi32(vksum0123, vinput_zero_point6); + __m512i vacc6x4567 = _mm512_mullo_epi32(vksum4567, vinput_zero_point6); + __m512i vacc6x89AB = _mm512_mullo_epi32(vksum89AB, vinput_zero_point6); + __m512i vacc6xCDEF = _mm512_mullo_epi32(vksumCDEF, vinput_zero_point6); w = (const int32_t*) w + 16; size_t k = kc; @@ -1929,6 +1962,12 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx( a2 += 8; const __m512i va3 = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a3))); a3 += 8; + const __m512i va4 = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a4))); + a4 += 8; + const __m512i va5 = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a5))); + a5 += 8; + const __m512i va6 = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a6))); + a6 += 8; const __m512i vb0123 = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) w)); @@ -1936,24 +1975,36 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx( vacc1x0123 = _mm512_add_epi32(vacc1x0123, _mm512_madd_epi16(va1, vb0123)); vacc2x0123 = _mm512_add_epi32(vacc2x0123, _mm512_madd_epi16(va2, vb0123)); vacc3x0123 = _mm512_add_epi32(vacc3x0123, _mm512_madd_epi16(va3, vb0123)); + vacc4x0123 = _mm512_add_epi32(vacc4x0123, _mm512_madd_epi16(va4, vb0123)); + vacc5x0123 = _mm512_add_epi32(vacc5x0123, _mm512_madd_epi16(va5, vb0123)); + vacc6x0123 = _mm512_add_epi32(vacc6x0123, _mm512_madd_epi16(va6, vb0123)); const __m512i vb4567 = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((const int8_t*) w + 32))); vacc0x4567 = _mm512_add_epi32(vacc0x4567, _mm512_madd_epi16(va0, vb4567)); vacc1x4567 = _mm512_add_epi32(vacc1x4567, _mm512_madd_epi16(va1, vb4567)); vacc2x4567 = _mm512_add_epi32(vacc2x4567, _mm512_madd_epi16(va2, vb4567)); vacc3x4567 = _mm512_add_epi32(vacc3x4567, _mm512_madd_epi16(va3, vb4567)); + vacc4x4567 = _mm512_add_epi32(vacc4x4567, _mm512_madd_epi16(va4, vb4567)); + vacc5x4567 = _mm512_add_epi32(vacc5x4567, _mm512_madd_epi16(va5, vb4567)); + vacc6x4567 = _mm512_add_epi32(vacc6x4567, _mm512_madd_epi16(va6, vb4567)); const __m512i vb89AB = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((const int8_t*) w + 64))); vacc0x89AB = _mm512_add_epi32(vacc0x89AB, _mm512_madd_epi16(va0, vb89AB)); vacc1x89AB = _mm512_add_epi32(vacc1x89AB, _mm512_madd_epi16(va1, vb89AB)); vacc2x89AB = _mm512_add_epi32(vacc2x89AB, _mm512_madd_epi16(va2, vb89AB)); vacc3x89AB = _mm512_add_epi32(vacc3x89AB, _mm512_madd_epi16(va3, vb89AB)); + vacc4x89AB = _mm512_add_epi32(vacc4x89AB, _mm512_madd_epi16(va4, vb89AB)); + vacc5x89AB = _mm512_add_epi32(vacc5x89AB, _mm512_madd_epi16(va5, vb89AB)); + vacc6x89AB = _mm512_add_epi32(vacc6x89AB, _mm512_madd_epi16(va6, vb89AB)); const __m512i vbCDEF = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((const int8_t*) w + 96))); vacc0xCDEF = _mm512_add_epi32(vacc0xCDEF, _mm512_madd_epi16(va0, vbCDEF)); vacc1xCDEF = _mm512_add_epi32(vacc1xCDEF, _mm512_madd_epi16(va1, vbCDEF)); vacc2xCDEF = _mm512_add_epi32(vacc2xCDEF, _mm512_madd_epi16(va2, vbCDEF)); vacc3xCDEF = _mm512_add_epi32(vacc3xCDEF, _mm512_madd_epi16(va3, vbCDEF)); + vacc4xCDEF = _mm512_add_epi32(vacc4xCDEF, _mm512_madd_epi16(va4, vbCDEF)); + vacc5xCDEF = _mm512_add_epi32(vacc5xCDEF, _mm512_madd_epi16(va5, vbCDEF)); + vacc6xCDEF = _mm512_add_epi32(vacc6xCDEF, _mm512_madd_epi16(va6, vbCDEF)); w = (const int8_t*) w + 128; k -= 8 * sizeof(int8_t); @@ -1967,26 +2018,44 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx( const __m512i vacc2x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc2x89AB, vacc2xCDEF), _mm512_unpackhi_epi32(vacc2x89AB, vacc2xCDEF)); const __m512i vacc3x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc3x0123, vacc3x4567), _mm512_unpackhi_epi32(vacc3x0123, vacc3x4567)); const __m512i vacc3x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc3x89AB, vacc3xCDEF), _mm512_unpackhi_epi32(vacc3x89AB, vacc3xCDEF)); + const __m512i vacc4x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc4x0123, vacc4x4567), _mm512_unpackhi_epi32(vacc4x0123, vacc4x4567)); + const __m512i vacc4x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc4x89AB, vacc4xCDEF), _mm512_unpackhi_epi32(vacc4x89AB, vacc4xCDEF)); + const __m512i vacc5x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc5x0123, vacc5x4567), _mm512_unpackhi_epi32(vacc5x0123, vacc5x4567)); + const __m512i vacc5x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc5x89AB, vacc5xCDEF), _mm512_unpackhi_epi32(vacc5x89AB, vacc5xCDEF)); + const __m512i vacc6x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc6x0123, vacc6x4567), _mm512_unpackhi_epi32(vacc6x0123, vacc6x4567)); + const __m512i vacc6x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc6x89AB, vacc6xCDEF), _mm512_unpackhi_epi32(vacc6x89AB, vacc6xCDEF)); __m512i vacc0x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc0x04152637, vacc0x8C9DAEBF), _mm512_unpackhi_epi32(vacc0x04152637, vacc0x8C9DAEBF)); __m512i vacc1x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc1x04152637, vacc1x8C9DAEBF), _mm512_unpackhi_epi32(vacc1x04152637, vacc1x8C9DAEBF)); __m512i vacc2x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc2x04152637, vacc2x8C9DAEBF), _mm512_unpackhi_epi32(vacc2x04152637, vacc2x8C9DAEBF)); __m512i vacc3x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc3x04152637, vacc3x8C9DAEBF), _mm512_unpackhi_epi32(vacc3x04152637, vacc3x8C9DAEBF)); + __m512i vacc4x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc4x04152637, vacc4x8C9DAEBF), _mm512_unpackhi_epi32(vacc4x04152637, vacc4x8C9DAEBF)); + __m512i vacc5x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc5x04152637, vacc5x8C9DAEBF), _mm512_unpackhi_epi32(vacc5x04152637, vacc5x8C9DAEBF)); + __m512i vacc6x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc6x04152637, vacc6x8C9DAEBF), _mm512_unpackhi_epi32(vacc6x04152637, vacc6x8C9DAEBF)); __m512 vscaled0x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc0x084C195D2A6E3B7F); __m512 vscaled1x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc1x084C195D2A6E3B7F); __m512 vscaled2x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc2x084C195D2A6E3B7F); __m512 vscaled3x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc3x084C195D2A6E3B7F); + __m512 vscaled4x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc4x084C195D2A6E3B7F); + __m512 vscaled5x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc5x084C195D2A6E3B7F); + __m512 vscaled6x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc6x084C195D2A6E3B7F); __m512 vout0x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled0x084C195D2A6E3B7F); __m512 vout1x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled1x084C195D2A6E3B7F); __m512 vout2x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled2x084C195D2A6E3B7F); __m512 vout3x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled3x084C195D2A6E3B7F); + __m512 vout4x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled4x084C195D2A6E3B7F); + __m512 vout5x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled5x084C195D2A6E3B7F); + __m512 vout6x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled6x084C195D2A6E3B7F); vout0x0123456789ABCDEF = _mm512_mul_ps(vout0x0123456789ABCDEF, _mm512_set1_ps(quantization_params[0].inv_scale)); vout1x0123456789ABCDEF = _mm512_mul_ps(vout1x0123456789ABCDEF, _mm512_set1_ps(quantization_params[1].inv_scale)); vout2x0123456789ABCDEF = _mm512_mul_ps(vout2x0123456789ABCDEF, _mm512_set1_ps(quantization_params[2].inv_scale)); vout3x0123456789ABCDEF = _mm512_mul_ps(vout3x0123456789ABCDEF, _mm512_set1_ps(quantization_params[3].inv_scale)); + vout4x0123456789ABCDEF = _mm512_mul_ps(vout4x0123456789ABCDEF, _mm512_set1_ps(quantization_params[4].inv_scale)); + vout5x0123456789ABCDEF = _mm512_mul_ps(vout5x0123456789ABCDEF, _mm512_set1_ps(quantization_params[5].inv_scale)); + vout6x0123456789ABCDEF = _mm512_mul_ps(vout6x0123456789ABCDEF, _mm512_set1_ps(quantization_params[6].inv_scale)); const __m512 vfilter_output_scale0123456789ABCDEF = _mm512_load_ps((const float*) w); const __m512 vbias0123456789ABCDEF = _mm512_load_ps((const float*) w + 16); @@ -1995,18 +2064,30 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx( vout1x0123456789ABCDEF = _mm512_fmadd_ps(vout1x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vout2x0123456789ABCDEF = _mm512_fmadd_ps(vout2x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vout3x0123456789ABCDEF = _mm512_fmadd_ps(vout3x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout4x0123456789ABCDEF = _mm512_fmadd_ps(vout4x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout5x0123456789ABCDEF = _mm512_fmadd_ps(vout5x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout6x0123456789ABCDEF = _mm512_fmadd_ps(vout6x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vout0x0123456789ABCDEF = _mm512_max_ps(vout0x0123456789ABCDEF, voutput_min); vout1x0123456789ABCDEF = _mm512_max_ps(vout1x0123456789ABCDEF, voutput_min); vout2x0123456789ABCDEF = _mm512_max_ps(vout2x0123456789ABCDEF, voutput_min); vout3x0123456789ABCDEF = _mm512_max_ps(vout3x0123456789ABCDEF, voutput_min); + vout4x0123456789ABCDEF = _mm512_max_ps(vout4x0123456789ABCDEF, voutput_min); + vout5x0123456789ABCDEF = _mm512_max_ps(vout5x0123456789ABCDEF, voutput_min); + vout6x0123456789ABCDEF = _mm512_max_ps(vout6x0123456789ABCDEF, voutput_min); vout0x0123456789ABCDEF = _mm512_min_ps(vout0x0123456789ABCDEF, voutput_max); vout1x0123456789ABCDEF = _mm512_min_ps(vout1x0123456789ABCDEF, voutput_max); vout2x0123456789ABCDEF = _mm512_min_ps(vout2x0123456789ABCDEF, voutput_max); vout3x0123456789ABCDEF = _mm512_min_ps(vout3x0123456789ABCDEF, voutput_max); + vout4x0123456789ABCDEF = _mm512_min_ps(vout4x0123456789ABCDEF, voutput_max); + vout5x0123456789ABCDEF = _mm512_min_ps(vout5x0123456789ABCDEF, voutput_max); + vout6x0123456789ABCDEF = _mm512_min_ps(vout6x0123456789ABCDEF, voutput_max); if (nc >= 16) { + _mm512_storeu_ps(c6, vout6x0123456789ABCDEF); + _mm512_storeu_ps(c5, vout5x0123456789ABCDEF); + _mm512_storeu_ps(c4, vout4x0123456789ABCDEF); _mm512_storeu_ps(c3, vout3x0123456789ABCDEF); _mm512_storeu_ps(c2, vout2x0123456789ABCDEF); _mm512_storeu_ps(c1, vout1x0123456789ABCDEF); @@ -2016,16 +2097,25 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx( a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); + a4 = (const int8_t*) ((uintptr_t) a4 - kc); + a5 = (const int8_t*) ((uintptr_t) a5 - kc); + a6 = (const int8_t*) ((uintptr_t) a6 - kc); c0 = (float*) ((uintptr_t) c0 + cn_stride); c1 = (float*) ((uintptr_t) c1 + cn_stride); c2 = (float*) ((uintptr_t) c2 + cn_stride); c3 = (float*) ((uintptr_t) c3 + cn_stride); + c4 = (float*) ((uintptr_t) c4 + cn_stride); + c5 = (float*) ((uintptr_t) c5 + cn_stride); + c6 = (float*) ((uintptr_t) c6 + cn_stride); nc -= 16; } else { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((UINT32_C(1) << nc) - 1); + _mm512_mask_storeu_ps(c6, vmask, vout6x0123456789ABCDEF); + _mm512_mask_storeu_ps(c5, vmask, vout5x0123456789ABCDEF); + _mm512_mask_storeu_ps(c4, vmask, vout4x0123456789ABCDEF); _mm512_mask_storeu_ps(c3, vmask, vout3x0123456789ABCDEF); _mm512_mask_storeu_ps(c2, vmask, vout2x0123456789ABCDEF); _mm512_mask_storeu_ps(c1, vmask, vout1x0123456789ABCDEF); @@ -2149,7 +2239,7 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_1x16c8__avx512skx( } while (nc != 0); } -void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( +void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_7x16c8__avx512skx( size_t mr, size_t nc, size_t kc, @@ -2165,7 +2255,7 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); - assert(mr <= 4); + assert(mr <= 7); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); @@ -2184,9 +2274,21 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); - if XNN_UNPREDICTABLE(mr != 4) { + if XNN_UNPREDICTABLE(mr < 4) { c3 = c2; } + float* c4 = (float*) ((uintptr_t) c3 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 4) { + c4 = c3; + } + float* c5 = (float*) ((uintptr_t) c4 + cm_stride); + if XNN_UNPREDICTABLE(mr < 6) { + c5 = c4; + } + float* c6 = (float*) ((uintptr_t) c5 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 6) { + c6 = c5; + } const __mmask16 vbias_mask = _cvtu32_mask16(0x1111); const __m512i vinput_zero_point = _mm512_set1_epi32((int) quantization_params->zero_point); @@ -2214,6 +2316,18 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( __m512i vacc3x4567 = vacc0x4567; __m512i vacc3x89AB = vacc0x89AB; __m512i vacc3xCDEF = vacc0xCDEF; + __m512i vacc4x0123 = vacc0x0123; + __m512i vacc4x4567 = vacc0x4567; + __m512i vacc4x89AB = vacc0x89AB; + __m512i vacc4xCDEF = vacc0xCDEF; + __m512i vacc5x0123 = vacc0x0123; + __m512i vacc5x4567 = vacc0x4567; + __m512i vacc5x89AB = vacc0x89AB; + __m512i vacc5xCDEF = vacc0xCDEF; + __m512i vacc6x0123 = vacc0x0123; + __m512i vacc6x4567 = vacc0x4567; + __m512i vacc6x89AB = vacc0x89AB; + __m512i vacc6xCDEF = vacc0xCDEF; w = (const void*) ((const int32_t*) w + 16); size_t p = ks; @@ -2234,7 +2348,19 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } - a += 4; + const int8_t* restrict a4 = a[4]; + if XNN_UNPREDICTABLE(a4 != zero) { + a4 = (const int8_t*) ((uintptr_t) a4 + a_offset); + } + const int8_t* restrict a5 = a[5]; + if XNN_UNPREDICTABLE(a5 != zero) { + a5 = (const int8_t*) ((uintptr_t) a5 + a_offset); + } + const int8_t* restrict a6 = a[6]; + if XNN_UNPREDICTABLE(a6 != zero) { + a6 = (const int8_t*) ((uintptr_t) a6 + a_offset); + } + a += 7; size_t k = 0; while (k < kc) { @@ -2246,6 +2372,12 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( a2 += 8; const __m512i va3 = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a3))); a3 += 8; + const __m512i va4 = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a4))); + a4 += 8; + const __m512i va5 = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a5))); + a5 += 8; + const __m512i va6 = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a6))); + a6 += 8; const __m512i vb0123 = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) w)); @@ -2253,29 +2385,41 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( vacc1x0123 = _mm512_add_epi32(vacc1x0123, _mm512_madd_epi16(va1, vb0123)); vacc2x0123 = _mm512_add_epi32(vacc2x0123, _mm512_madd_epi16(va2, vb0123)); vacc3x0123 = _mm512_add_epi32(vacc3x0123, _mm512_madd_epi16(va3, vb0123)); + vacc4x0123 = _mm512_add_epi32(vacc4x0123, _mm512_madd_epi16(va4, vb0123)); + vacc5x0123 = _mm512_add_epi32(vacc5x0123, _mm512_madd_epi16(va5, vb0123)); + vacc6x0123 = _mm512_add_epi32(vacc6x0123, _mm512_madd_epi16(va6, vb0123)); const __m512i vb4567 = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((const int8_t*) w + 32))); vacc0x4567 = _mm512_add_epi32(vacc0x4567, _mm512_madd_epi16(va0, vb4567)); vacc1x4567 = _mm512_add_epi32(vacc1x4567, _mm512_madd_epi16(va1, vb4567)); vacc2x4567 = _mm512_add_epi32(vacc2x4567, _mm512_madd_epi16(va2, vb4567)); vacc3x4567 = _mm512_add_epi32(vacc3x4567, _mm512_madd_epi16(va3, vb4567)); + vacc4x4567 = _mm512_add_epi32(vacc4x4567, _mm512_madd_epi16(va4, vb4567)); + vacc5x4567 = _mm512_add_epi32(vacc5x4567, _mm512_madd_epi16(va5, vb4567)); + vacc6x4567 = _mm512_add_epi32(vacc6x4567, _mm512_madd_epi16(va6, vb4567)); const __m512i vb89AB = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((const int8_t*) w + 64))); vacc0x89AB = _mm512_add_epi32(vacc0x89AB, _mm512_madd_epi16(va0, vb89AB)); vacc1x89AB = _mm512_add_epi32(vacc1x89AB, _mm512_madd_epi16(va1, vb89AB)); vacc2x89AB = _mm512_add_epi32(vacc2x89AB, _mm512_madd_epi16(va2, vb89AB)); vacc3x89AB = _mm512_add_epi32(vacc3x89AB, _mm512_madd_epi16(va3, vb89AB)); + vacc4x89AB = _mm512_add_epi32(vacc4x89AB, _mm512_madd_epi16(va4, vb89AB)); + vacc5x89AB = _mm512_add_epi32(vacc5x89AB, _mm512_madd_epi16(va5, vb89AB)); + vacc6x89AB = _mm512_add_epi32(vacc6x89AB, _mm512_madd_epi16(va6, vb89AB)); const __m512i vbCDEF = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((const int8_t*) w + 96))); vacc0xCDEF = _mm512_add_epi32(vacc0xCDEF, _mm512_madd_epi16(va0, vbCDEF)); vacc1xCDEF = _mm512_add_epi32(vacc1xCDEF, _mm512_madd_epi16(va1, vbCDEF)); vacc2xCDEF = _mm512_add_epi32(vacc2xCDEF, _mm512_madd_epi16(va2, vbCDEF)); vacc3xCDEF = _mm512_add_epi32(vacc3xCDEF, _mm512_madd_epi16(va3, vbCDEF)); + vacc4xCDEF = _mm512_add_epi32(vacc4xCDEF, _mm512_madd_epi16(va4, vbCDEF)); + vacc5xCDEF = _mm512_add_epi32(vacc5xCDEF, _mm512_madd_epi16(va5, vbCDEF)); + vacc6xCDEF = _mm512_add_epi32(vacc6xCDEF, _mm512_madd_epi16(va6, vbCDEF)); w = (const void*) ((const int8_t*) w + 128); k += 8 * sizeof(int8_t); } - p -= 4 * sizeof(void*); + p -= 7 * sizeof(void*); } while (p != 0); const __m512i vacc0x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc0x0123, vacc0x4567), _mm512_unpackhi_epi32(vacc0x0123, vacc0x4567)); @@ -2286,27 +2430,45 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( const __m512i vacc2x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc2x89AB, vacc2xCDEF), _mm512_unpackhi_epi32(vacc2x89AB, vacc2xCDEF)); const __m512i vacc3x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc3x0123, vacc3x4567), _mm512_unpackhi_epi32(vacc3x0123, vacc3x4567)); const __m512i vacc3x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc3x89AB, vacc3xCDEF), _mm512_unpackhi_epi32(vacc3x89AB, vacc3xCDEF)); + const __m512i vacc4x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc4x0123, vacc4x4567), _mm512_unpackhi_epi32(vacc4x0123, vacc4x4567)); + const __m512i vacc4x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc4x89AB, vacc4xCDEF), _mm512_unpackhi_epi32(vacc4x89AB, vacc4xCDEF)); + const __m512i vacc5x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc5x0123, vacc5x4567), _mm512_unpackhi_epi32(vacc5x0123, vacc5x4567)); + const __m512i vacc5x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc5x89AB, vacc5xCDEF), _mm512_unpackhi_epi32(vacc5x89AB, vacc5xCDEF)); + const __m512i vacc6x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc6x0123, vacc6x4567), _mm512_unpackhi_epi32(vacc6x0123, vacc6x4567)); + const __m512i vacc6x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc6x89AB, vacc6xCDEF), _mm512_unpackhi_epi32(vacc6x89AB, vacc6xCDEF)); __m512i vacc0x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc0x04152637, vacc0x8C9DAEBF), _mm512_unpackhi_epi32(vacc0x04152637, vacc0x8C9DAEBF)); __m512i vacc1x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc1x04152637, vacc1x8C9DAEBF), _mm512_unpackhi_epi32(vacc1x04152637, vacc1x8C9DAEBF)); __m512i vacc2x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc2x04152637, vacc2x8C9DAEBF), _mm512_unpackhi_epi32(vacc2x04152637, vacc2x8C9DAEBF)); __m512i vacc3x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc3x04152637, vacc3x8C9DAEBF), _mm512_unpackhi_epi32(vacc3x04152637, vacc3x8C9DAEBF)); + __m512i vacc4x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc4x04152637, vacc4x8C9DAEBF), _mm512_unpackhi_epi32(vacc4x04152637, vacc4x8C9DAEBF)); + __m512i vacc5x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc5x04152637, vacc5x8C9DAEBF), _mm512_unpackhi_epi32(vacc5x04152637, vacc5x8C9DAEBF)); + __m512i vacc6x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc6x04152637, vacc6x8C9DAEBF), _mm512_unpackhi_epi32(vacc6x04152637, vacc6x8C9DAEBF)); __m512 vscaled0x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc0x084C195D2A6E3B7F); __m512 vscaled1x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc1x084C195D2A6E3B7F); __m512 vscaled2x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc2x084C195D2A6E3B7F); __m512 vscaled3x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc3x084C195D2A6E3B7F); + __m512 vscaled4x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc4x084C195D2A6E3B7F); + __m512 vscaled5x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc5x084C195D2A6E3B7F); + __m512 vscaled6x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc6x084C195D2A6E3B7F); __m512 vout0x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled0x084C195D2A6E3B7F); __m512 vout1x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled1x084C195D2A6E3B7F); __m512 vout2x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled2x084C195D2A6E3B7F); __m512 vout3x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled3x084C195D2A6E3B7F); + __m512 vout4x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled4x084C195D2A6E3B7F); + __m512 vout5x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled5x084C195D2A6E3B7F); + __m512 vout6x0123456789ABCDEF = _mm512_permutexvar_ps(_mm512_set_epi32(15, 11, 7, 3, 13, 9, 5, 1, 14, 10, 6, 2, 12, 8, 4, 0), vscaled6x084C195D2A6E3B7F); __m512 vinput_inv_scale = _mm512_set1_ps(quantization_params->inv_scale); vout0x0123456789ABCDEF = _mm512_mul_ps(vout0x0123456789ABCDEF, vinput_inv_scale); vout1x0123456789ABCDEF = _mm512_mul_ps(vout1x0123456789ABCDEF, vinput_inv_scale); vout2x0123456789ABCDEF = _mm512_mul_ps(vout2x0123456789ABCDEF, vinput_inv_scale); vout3x0123456789ABCDEF = _mm512_mul_ps(vout3x0123456789ABCDEF, vinput_inv_scale); + vout4x0123456789ABCDEF = _mm512_mul_ps(vout4x0123456789ABCDEF, vinput_inv_scale); + vout5x0123456789ABCDEF = _mm512_mul_ps(vout5x0123456789ABCDEF, vinput_inv_scale); + vout6x0123456789ABCDEF = _mm512_mul_ps(vout6x0123456789ABCDEF, vinput_inv_scale); const __m512 vfilter_output_scale0123456789ABCDEF = _mm512_load_ps((const float*) w); const __m512 vbias0123456789ABCDEF = _mm512_load_ps((const float*) w + 16); @@ -2315,18 +2477,30 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( vout1x0123456789ABCDEF = _mm512_fmadd_ps(vout1x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vout2x0123456789ABCDEF = _mm512_fmadd_ps(vout2x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vout3x0123456789ABCDEF = _mm512_fmadd_ps(vout3x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout4x0123456789ABCDEF = _mm512_fmadd_ps(vout4x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout5x0123456789ABCDEF = _mm512_fmadd_ps(vout5x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vout6x0123456789ABCDEF = _mm512_fmadd_ps(vout6x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vout0x0123456789ABCDEF = _mm512_max_ps(vout0x0123456789ABCDEF, voutput_min); vout1x0123456789ABCDEF = _mm512_max_ps(vout1x0123456789ABCDEF, voutput_min); vout2x0123456789ABCDEF = _mm512_max_ps(vout2x0123456789ABCDEF, voutput_min); vout3x0123456789ABCDEF = _mm512_max_ps(vout3x0123456789ABCDEF, voutput_min); + vout4x0123456789ABCDEF = _mm512_max_ps(vout4x0123456789ABCDEF, voutput_min); + vout5x0123456789ABCDEF = _mm512_max_ps(vout5x0123456789ABCDEF, voutput_min); + vout6x0123456789ABCDEF = _mm512_max_ps(vout6x0123456789ABCDEF, voutput_min); vout0x0123456789ABCDEF = _mm512_min_ps(vout0x0123456789ABCDEF, voutput_max); vout1x0123456789ABCDEF = _mm512_min_ps(vout1x0123456789ABCDEF, voutput_max); vout2x0123456789ABCDEF = _mm512_min_ps(vout2x0123456789ABCDEF, voutput_max); vout3x0123456789ABCDEF = _mm512_min_ps(vout3x0123456789ABCDEF, voutput_max); + vout4x0123456789ABCDEF = _mm512_min_ps(vout4x0123456789ABCDEF, voutput_max); + vout5x0123456789ABCDEF = _mm512_min_ps(vout5x0123456789ABCDEF, voutput_max); + vout6x0123456789ABCDEF = _mm512_min_ps(vout6x0123456789ABCDEF, voutput_max); if (nc >= 16) { + _mm512_storeu_ps(c6, vout6x0123456789ABCDEF); + _mm512_storeu_ps(c5, vout5x0123456789ABCDEF); + _mm512_storeu_ps(c4, vout4x0123456789ABCDEF); _mm512_storeu_ps(c3, vout3x0123456789ABCDEF); _mm512_storeu_ps(c2, vout2x0123456789ABCDEF); _mm512_storeu_ps(c1, vout1x0123456789ABCDEF); @@ -2336,12 +2510,18 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx( c1 = (float*) ((uintptr_t) c1 + cn_stride); c2 = (float*) ((uintptr_t) c2 + cn_stride); c3 = (float*) ((uintptr_t) c3 + cn_stride); + c4 = (float*) ((uintptr_t) c4 + cn_stride); + c5 = (float*) ((uintptr_t) c5 + cn_stride); + c6 = (float*) ((uintptr_t) c6 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((UINT32_C(1) << nc) - 1); + _mm512_mask_storeu_ps(c6, vmask, vout6x0123456789ABCDEF); + _mm512_mask_storeu_ps(c5, vmask, vout5x0123456789ABCDEF); + _mm512_mask_storeu_ps(c4, vmask, vout4x0123456789ABCDEF); _mm512_mask_storeu_ps(c3, vmask, vout3x0123456789ABCDEF); _mm512_mask_storeu_ps(c2, vmask, vout2x0123456789ABCDEF); _mm512_mask_storeu_ps(c1, vmask, vout1x0123456789ABCDEF); diff --git a/src/amalgam/gen/avx512vnni.c b/src/amalgam/gen/avx512vnni.c index 1423aa71f79..1eeba7ac126 100644 --- a/src/amalgam/gen/avx512vnni.c +++ b/src/amalgam/gen/avx512vnni.c @@ -49,9 +49,8 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__avx512vnni( w = (const int32_t*) w + 16; size_t k = kc; - do { + while (k >= 4 * sizeof(int8_t)) { __m512i va0x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a0)); - a0 += 4; va0x0123 = _mm512_xor_epi32(va0x0123, vsign_mask); @@ -62,7 +61,7 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__avx512vnni( w = (const int8_t*) w + 64; k -= 4 * sizeof(int8_t); - } while (k != 0); + } __m512 vscaled0x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0x0123456789ABCDEF); @@ -95,7 +94,7 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__avx512vnni( } while (nc != 0); } -void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__avx512vnni( +void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c4__avx512vnni( size_t mr, size_t nc, size_t kc, @@ -109,7 +108,7 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__avx512vnni( const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); - assert(mr <= 4); + assert(mr <= 7); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); @@ -134,15 +133,36 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__avx512vnni( } const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride); float* c3 = (float*) ((uintptr_t) c2 + cm_stride); - if XNN_UNPREDICTABLE(mr != 4) { + if XNN_UNPREDICTABLE(mr < 4) { a3 = a2; c3 = c2; } + const int8_t* a4 = (const int8_t*) ((uintptr_t) a3 + a_stride); + float* c4 = (float*) ((uintptr_t) c3 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 4) { + a4 = a3; + c4 = c3; + } + const int8_t* a5 = (const int8_t*) ((uintptr_t) a4 + a_stride); + float* c5 = (float*) ((uintptr_t) c4 + cm_stride); + if XNN_UNPREDICTABLE(mr < 6) { + a5 = a4; + c5 = c4; + } + const int8_t* a6 = (const int8_t*) ((uintptr_t) a5 + a_stride); + float* c6 = (float*) ((uintptr_t) c5 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 6) { + a6 = a5; + c6 = c5; + } const __m512i vinput_zero_point0 = _mm512_set1_epi32((int) quantization_params[0].zero_point + 128); const __m512i vinput_zero_point1 = _mm512_set1_epi32((int) quantization_params[1].zero_point + 128); const __m512i vinput_zero_point2 = _mm512_set1_epi32((int) quantization_params[2].zero_point + 128); const __m512i vinput_zero_point3 = _mm512_set1_epi32((int) quantization_params[3].zero_point + 128); + const __m512i vinput_zero_point4 = _mm512_set1_epi32((int) quantization_params[4].zero_point + 128); + const __m512i vinput_zero_point5 = _mm512_set1_epi32((int) quantization_params[5].zero_point + 128); + const __m512i vinput_zero_point6 = _mm512_set1_epi32((int) quantization_params[6].zero_point + 128); const __m512 voutput_min = _mm512_set1_ps(params->avx512vnni.min); const __m512 voutput_max = _mm512_set1_ps(params->avx512vnni.max); const __m512i vsign_mask = _mm512_load_si512(params->avx512vnni.sign_mask); @@ -152,24 +172,35 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__avx512vnni( __m512i vacc1x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point1); __m512i vacc2x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point2); __m512i vacc3x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point3); + __m512i vacc4x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point4); + __m512i vacc5x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point5); + __m512i vacc6x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point6); w = (const int32_t*) w + 16; size_t k = kc; - do { + while (k >= 4 * sizeof(int8_t)) { __m512i va0x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a0)); - __m512i va1x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a1)); - __m512i va2x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a2)); - __m512i va3x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a3)); - a0 += 4; + __m512i va1x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a1)); a1 += 4; + __m512i va2x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a2)); a2 += 4; + __m512i va3x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a3)); a3 += 4; + __m512i va4x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a4)); + a4 += 4; + __m512i va5x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a5)); + a5 += 4; + __m512i va6x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a6)); + a6 += 4; va0x0123 = _mm512_xor_epi32(va0x0123, vsign_mask); va1x0123 = _mm512_xor_epi32(va1x0123, vsign_mask); va2x0123 = _mm512_xor_epi32(va2x0123, vsign_mask); va3x0123 = _mm512_xor_epi32(va3x0123, vsign_mask); + va4x0123 = _mm512_xor_epi32(va4x0123, vsign_mask); + va5x0123 = _mm512_xor_epi32(va5x0123, vsign_mask); + va6x0123 = _mm512_xor_epi32(va6x0123, vsign_mask); const __m512i vb0123456789ABCDEF = _mm512_load_si512(w); @@ -177,20 +208,29 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__avx512vnni( vacc1x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc1x0123456789ABCDEF, va1x0123, vb0123456789ABCDEF); vacc2x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc2x0123456789ABCDEF, va2x0123, vb0123456789ABCDEF); vacc3x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc3x0123456789ABCDEF, va3x0123, vb0123456789ABCDEF); + vacc4x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc4x0123456789ABCDEF, va4x0123, vb0123456789ABCDEF); + vacc5x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc5x0123456789ABCDEF, va5x0123, vb0123456789ABCDEF); + vacc6x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc6x0123456789ABCDEF, va6x0123, vb0123456789ABCDEF); w = (const int8_t*) w + 64; k -= 4 * sizeof(int8_t); - } while (k != 0); + } __m512 vscaled0x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0x0123456789ABCDEF); __m512 vscaled1x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc1x0123456789ABCDEF); __m512 vscaled2x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc2x0123456789ABCDEF); __m512 vscaled3x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc3x0123456789ABCDEF); + __m512 vscaled4x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc4x0123456789ABCDEF); + __m512 vscaled5x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc5x0123456789ABCDEF); + __m512 vscaled6x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc6x0123456789ABCDEF); vscaled0x0123456789ABCDEF = _mm512_mul_ps(vscaled0x0123456789ABCDEF, _mm512_set1_ps(quantization_params[0].inv_scale)); vscaled1x0123456789ABCDEF = _mm512_mul_ps(vscaled1x0123456789ABCDEF, _mm512_set1_ps(quantization_params[1].inv_scale)); vscaled2x0123456789ABCDEF = _mm512_mul_ps(vscaled2x0123456789ABCDEF, _mm512_set1_ps(quantization_params[2].inv_scale)); vscaled3x0123456789ABCDEF = _mm512_mul_ps(vscaled3x0123456789ABCDEF, _mm512_set1_ps(quantization_params[3].inv_scale)); + vscaled4x0123456789ABCDEF = _mm512_mul_ps(vscaled4x0123456789ABCDEF, _mm512_set1_ps(quantization_params[4].inv_scale)); + vscaled5x0123456789ABCDEF = _mm512_mul_ps(vscaled5x0123456789ABCDEF, _mm512_set1_ps(quantization_params[5].inv_scale)); + vscaled6x0123456789ABCDEF = _mm512_mul_ps(vscaled6x0123456789ABCDEF, _mm512_set1_ps(quantization_params[6].inv_scale)); const __m512 vfilter_output_scale0123456789ABCDEF = _mm512_load_ps((const float*) w); const __m512 vbias0123456789ABCDEF = _mm512_load_ps((const float*) w + 16); @@ -200,18 +240,30 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__avx512vnni( vscaled1x0123456789ABCDEF = _mm512_fmadd_ps(vscaled1x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vscaled2x0123456789ABCDEF = _mm512_fmadd_ps(vscaled2x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vscaled3x0123456789ABCDEF = _mm512_fmadd_ps(vscaled3x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vscaled4x0123456789ABCDEF = _mm512_fmadd_ps(vscaled4x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vscaled5x0123456789ABCDEF = _mm512_fmadd_ps(vscaled5x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vscaled6x0123456789ABCDEF = _mm512_fmadd_ps(vscaled6x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vscaled0x0123456789ABCDEF = _mm512_max_ps(vscaled0x0123456789ABCDEF, voutput_min); vscaled1x0123456789ABCDEF = _mm512_max_ps(vscaled1x0123456789ABCDEF, voutput_min); vscaled2x0123456789ABCDEF = _mm512_max_ps(vscaled2x0123456789ABCDEF, voutput_min); vscaled3x0123456789ABCDEF = _mm512_max_ps(vscaled3x0123456789ABCDEF, voutput_min); + vscaled4x0123456789ABCDEF = _mm512_max_ps(vscaled4x0123456789ABCDEF, voutput_min); + vscaled5x0123456789ABCDEF = _mm512_max_ps(vscaled5x0123456789ABCDEF, voutput_min); + vscaled6x0123456789ABCDEF = _mm512_max_ps(vscaled6x0123456789ABCDEF, voutput_min); vscaled0x0123456789ABCDEF = _mm512_min_ps(vscaled0x0123456789ABCDEF, voutput_max); vscaled1x0123456789ABCDEF = _mm512_min_ps(vscaled1x0123456789ABCDEF, voutput_max); vscaled2x0123456789ABCDEF = _mm512_min_ps(vscaled2x0123456789ABCDEF, voutput_max); vscaled3x0123456789ABCDEF = _mm512_min_ps(vscaled3x0123456789ABCDEF, voutput_max); + vscaled4x0123456789ABCDEF = _mm512_min_ps(vscaled4x0123456789ABCDEF, voutput_max); + vscaled5x0123456789ABCDEF = _mm512_min_ps(vscaled5x0123456789ABCDEF, voutput_max); + vscaled6x0123456789ABCDEF = _mm512_min_ps(vscaled6x0123456789ABCDEF, voutput_max); if(nc >= 16) { + _mm512_storeu_ps(c6, vscaled6x0123456789ABCDEF); + _mm512_storeu_ps(c5, vscaled5x0123456789ABCDEF); + _mm512_storeu_ps(c4, vscaled4x0123456789ABCDEF); _mm512_storeu_ps(c3, vscaled3x0123456789ABCDEF); _mm512_storeu_ps(c2, vscaled2x0123456789ABCDEF); _mm512_storeu_ps(c1, vscaled1x0123456789ABCDEF); @@ -221,16 +273,25 @@ void xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__avx512vnni( a1 = (const int8_t*) ((uintptr_t) a1 - kc); a2 = (const int8_t*) ((uintptr_t) a2 - kc); a3 = (const int8_t*) ((uintptr_t) a3 - kc); + a4 = (const int8_t*) ((uintptr_t) a4 - kc); + a5 = (const int8_t*) ((uintptr_t) a5 - kc); + a6 = (const int8_t*) ((uintptr_t) a6 - kc); c0 = (float*) ((uintptr_t) c0 + cn_stride); c1 = (float*) ((uintptr_t) c1 + cn_stride); c2 = (float*) ((uintptr_t) c2 + cn_stride); c3 = (float*) ((uintptr_t) c3 + cn_stride); + c4 = (float*) ((uintptr_t) c4 + cn_stride); + c5 = (float*) ((uintptr_t) c5 + cn_stride); + c6 = (float*) ((uintptr_t) c6 + cn_stride); nc -= 16; } else { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((UINT32_C(1) << nc) - 1); + _mm512_mask_storeu_ps(c6, vmask, vscaled6x0123456789ABCDEF); + _mm512_mask_storeu_ps(c5, vmask, vscaled5x0123456789ABCDEF); + _mm512_mask_storeu_ps(c4, vmask, vscaled4x0123456789ABCDEF); _mm512_mask_storeu_ps(c3, vmask, vscaled3x0123456789ABCDEF); _mm512_mask_storeu_ps(c2, vmask, vscaled2x0123456789ABCDEF); _mm512_mask_storeu_ps(c1, vmask, vscaled1x0123456789ABCDEF); @@ -349,7 +410,7 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_1x16c4__avx512vnni( } -void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( +void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_7x16c4__avx512vnni( size_t mr, size_t nc, size_t kc, @@ -365,7 +426,7 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( const struct xnn_qd8_quantization_params quantization_params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); - assert(mr <= 4); + assert(mr <= 7); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(int8_t) == 0); @@ -384,9 +445,21 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( c2 = c1; } float* c3 = (float*) ((uintptr_t) c2 + cm_stride); - if XNN_UNPREDICTABLE(mr != 4) { + if XNN_UNPREDICTABLE(mr < 4) { c3 = c2; } + float* c4 = (float*) ((uintptr_t) c3 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 4) { + c4 = c3; + } + float* c5 = (float*) ((uintptr_t) c4 + cm_stride); + if XNN_UNPREDICTABLE(mr < 6) { + c5 = c4; + } + float* c6 = (float*) ((uintptr_t) c5 + cm_stride); + if XNN_UNPREDICTABLE(mr <= 6) { + c6 = c5; + } const __m512i vinput_zero_point = _mm512_set1_epi32((int) quantization_params->zero_point + 128); const __m512 vinput_scale = _mm512_set1_ps(quantization_params->inv_scale); @@ -399,6 +472,9 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( __m512i vacc1x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point); __m512i vacc2x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point); __m512i vacc3x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point); + __m512i vacc4x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point); + __m512i vacc5x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point); + __m512i vacc6x0123456789ABCDEF = _mm512_mullo_epi32(vksum0123456789ABCDEF, vinput_zero_point); w = (const int32_t*) w + 16; size_t p = ks; @@ -419,7 +495,19 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } - a += 4; + const int8_t* restrict a4 = a[4]; + if XNN_UNPREDICTABLE(a4 != zero) { + a4 = (const int8_t*) ((uintptr_t) a4 + a_offset); + } + const int8_t* restrict a5 = a[5]; + if XNN_UNPREDICTABLE(a5 != zero) { + a5 = (const int8_t*) ((uintptr_t) a5 + a_offset); + } + const int8_t* restrict a6 = a[6]; + if XNN_UNPREDICTABLE(a6 != zero) { + a6 = (const int8_t*) ((uintptr_t) a6 + a_offset); + } + a += 7; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { @@ -427,11 +515,17 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( __m512i va1x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a1)); a1 += 4; __m512i va2x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a2)); a2 += 4; __m512i va3x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a3)); a3 += 4; + __m512i va4x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a4)); a4 += 4; + __m512i va5x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a5)); a5 += 4; + __m512i va6x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a6)); a6 += 4; __m512i va0x4567 = _mm512_set1_epi32((int) unaligned_load_u32(a0)); a0 += 4; __m512i va1x4567 = _mm512_set1_epi32((int) unaligned_load_u32(a1)); a1 += 4; __m512i va2x4567 = _mm512_set1_epi32((int) unaligned_load_u32(a2)); a2 += 4; __m512i va3x4567 = _mm512_set1_epi32((int) unaligned_load_u32(a3)); a3 += 4; + __m512i va4x4567 = _mm512_set1_epi32((int) unaligned_load_u32(a4)); a4 += 4; + __m512i va5x4567 = _mm512_set1_epi32((int) unaligned_load_u32(a5)); a5 += 4; + __m512i va6x4567 = _mm512_set1_epi32((int) unaligned_load_u32(a6)); a6 += 4; va0x0123 = _mm512_xor_epi32(va0x0123, vsign_mask); va0x4567 = _mm512_xor_epi32(va0x4567, vsign_mask); @@ -441,6 +535,12 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( va2x4567 = _mm512_xor_epi32(va2x4567, vsign_mask); va3x0123 = _mm512_xor_epi32(va3x0123, vsign_mask); va3x4567 = _mm512_xor_epi32(va3x4567, vsign_mask); + va4x0123 = _mm512_xor_epi32(va4x0123, vsign_mask); + va4x4567 = _mm512_xor_epi32(va4x4567, vsign_mask); + va5x0123 = _mm512_xor_epi32(va5x0123, vsign_mask); + va5x4567 = _mm512_xor_epi32(va5x4567, vsign_mask); + va6x0123 = _mm512_xor_epi32(va6x0123, vsign_mask); + va6x4567 = _mm512_xor_epi32(va6x4567, vsign_mask); const __m512i vb0123456789ABCDEFx0123 = _mm512_load_si512(w); w = (const int8_t*) w + 64; @@ -451,10 +551,16 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( vacc1x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc1x0123456789ABCDEF, va1x0123, vb0123456789ABCDEFx0123); vacc2x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc2x0123456789ABCDEF, va2x0123, vb0123456789ABCDEFx0123); vacc3x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc3x0123456789ABCDEF, va3x0123, vb0123456789ABCDEFx0123); + vacc4x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc4x0123456789ABCDEF, va4x0123, vb0123456789ABCDEFx0123); + vacc5x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc5x0123456789ABCDEF, va5x0123, vb0123456789ABCDEFx0123); + vacc6x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc6x0123456789ABCDEF, va6x0123, vb0123456789ABCDEFx0123); vacc0x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc0x0123456789ABCDEF, va0x4567, vb0123456789ABCDEFx4567); vacc1x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc1x0123456789ABCDEF, va1x4567, vb0123456789ABCDEFx4567); vacc2x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc2x0123456789ABCDEF, va2x4567, vb0123456789ABCDEFx4567); vacc3x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc3x0123456789ABCDEF, va3x4567, vb0123456789ABCDEFx4567); + vacc4x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc4x0123456789ABCDEF, va4x4567, vb0123456789ABCDEFx4567); + vacc5x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc5x0123456789ABCDEF, va5x4567, vb0123456789ABCDEFx4567); + vacc6x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc6x0123456789ABCDEF, va6x4567, vb0123456789ABCDEFx4567); k -= 8 * sizeof(int8_t); } @@ -463,11 +569,17 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( __m512i va1x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a1)); a1 += 4; __m512i va2x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a2)); a2 += 4; __m512i va3x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a3)); a3 += 4; + __m512i va4x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a4)); a4 += 4; + __m512i va5x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a5)); a5 += 4; + __m512i va6x0123 = _mm512_set1_epi32((int) unaligned_load_u32(a6)); a6 += 4; va0x0123 = _mm512_xor_epi32(va0x0123, vsign_mask); va1x0123 = _mm512_xor_epi32(va1x0123, vsign_mask); va2x0123 = _mm512_xor_epi32(va2x0123, vsign_mask); va3x0123 = _mm512_xor_epi32(va3x0123, vsign_mask); + va4x0123 = _mm512_xor_epi32(va4x0123, vsign_mask); + va5x0123 = _mm512_xor_epi32(va5x0123, vsign_mask); + va6x0123 = _mm512_xor_epi32(va6x0123, vsign_mask); const __m512i vb0123456789ABCDEFx0123 = _mm512_load_si512(w); w = (const int8_t*) w + 64; @@ -476,19 +588,28 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( vacc1x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc1x0123456789ABCDEF, va1x0123, vb0123456789ABCDEFx0123); vacc2x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc2x0123456789ABCDEF, va2x0123, vb0123456789ABCDEFx0123); vacc3x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc3x0123456789ABCDEF, va3x0123, vb0123456789ABCDEFx0123); + vacc4x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc4x0123456789ABCDEF, va4x0123, vb0123456789ABCDEFx0123); + vacc5x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc5x0123456789ABCDEF, va5x0123, vb0123456789ABCDEFx0123); + vacc6x0123456789ABCDEF = _mm512_dpbusd_epi32(vacc6x0123456789ABCDEF, va6x0123, vb0123456789ABCDEFx0123); } - p -= 4 * sizeof(void*); + p -= 7 * sizeof(void*); } while (p != 0); __m512 vscaled0x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0x0123456789ABCDEF); __m512 vscaled1x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc1x0123456789ABCDEF); __m512 vscaled2x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc2x0123456789ABCDEF); __m512 vscaled3x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc3x0123456789ABCDEF); + __m512 vscaled4x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc4x0123456789ABCDEF); + __m512 vscaled5x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc5x0123456789ABCDEF); + __m512 vscaled6x0123456789ABCDEF = _mm512_cvtepi32_ps(vacc6x0123456789ABCDEF); vscaled0x0123456789ABCDEF = _mm512_mul_ps(vscaled0x0123456789ABCDEF, vinput_scale); vscaled1x0123456789ABCDEF = _mm512_mul_ps(vscaled1x0123456789ABCDEF, vinput_scale); vscaled2x0123456789ABCDEF = _mm512_mul_ps(vscaled2x0123456789ABCDEF, vinput_scale); vscaled3x0123456789ABCDEF = _mm512_mul_ps(vscaled3x0123456789ABCDEF, vinput_scale); + vscaled4x0123456789ABCDEF = _mm512_mul_ps(vscaled4x0123456789ABCDEF, vinput_scale); + vscaled5x0123456789ABCDEF = _mm512_mul_ps(vscaled5x0123456789ABCDEF, vinput_scale); + vscaled6x0123456789ABCDEF = _mm512_mul_ps(vscaled6x0123456789ABCDEF, vinput_scale); const __m512 vfilter_output_scale0123456789ABCDEF = _mm512_load_ps((const float*) w); const __m512 vbias0123456789ABCDEF = _mm512_load_ps((const float*) w + 16); @@ -498,18 +619,30 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( vscaled1x0123456789ABCDEF = _mm512_fmadd_ps(vscaled1x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vscaled2x0123456789ABCDEF = _mm512_fmadd_ps(vscaled2x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vscaled3x0123456789ABCDEF = _mm512_fmadd_ps(vscaled3x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vscaled4x0123456789ABCDEF = _mm512_fmadd_ps(vscaled4x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vscaled5x0123456789ABCDEF = _mm512_fmadd_ps(vscaled5x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); + vscaled6x0123456789ABCDEF = _mm512_fmadd_ps(vscaled6x0123456789ABCDEF, vfilter_output_scale0123456789ABCDEF, vbias0123456789ABCDEF); vscaled0x0123456789ABCDEF = _mm512_max_ps(vscaled0x0123456789ABCDEF, voutput_min); vscaled1x0123456789ABCDEF = _mm512_max_ps(vscaled1x0123456789ABCDEF, voutput_min); vscaled2x0123456789ABCDEF = _mm512_max_ps(vscaled2x0123456789ABCDEF, voutput_min); vscaled3x0123456789ABCDEF = _mm512_max_ps(vscaled3x0123456789ABCDEF, voutput_min); + vscaled4x0123456789ABCDEF = _mm512_max_ps(vscaled4x0123456789ABCDEF, voutput_min); + vscaled5x0123456789ABCDEF = _mm512_max_ps(vscaled5x0123456789ABCDEF, voutput_min); + vscaled6x0123456789ABCDEF = _mm512_max_ps(vscaled6x0123456789ABCDEF, voutput_min); vscaled0x0123456789ABCDEF = _mm512_min_ps(vscaled0x0123456789ABCDEF, voutput_max); vscaled1x0123456789ABCDEF = _mm512_min_ps(vscaled1x0123456789ABCDEF, voutput_max); vscaled2x0123456789ABCDEF = _mm512_min_ps(vscaled2x0123456789ABCDEF, voutput_max); vscaled3x0123456789ABCDEF = _mm512_min_ps(vscaled3x0123456789ABCDEF, voutput_max); + vscaled4x0123456789ABCDEF = _mm512_min_ps(vscaled4x0123456789ABCDEF, voutput_max); + vscaled5x0123456789ABCDEF = _mm512_min_ps(vscaled5x0123456789ABCDEF, voutput_max); + vscaled6x0123456789ABCDEF = _mm512_min_ps(vscaled6x0123456789ABCDEF, voutput_max); if(nc >= 16) { + _mm512_storeu_ps(c6, vscaled6x0123456789ABCDEF); + _mm512_storeu_ps(c5, vscaled5x0123456789ABCDEF); + _mm512_storeu_ps(c4, vscaled4x0123456789ABCDEF); _mm512_storeu_ps(c3, vscaled3x0123456789ABCDEF); _mm512_storeu_ps(c2, vscaled2x0123456789ABCDEF); _mm512_storeu_ps(c1, vscaled1x0123456789ABCDEF); @@ -521,11 +654,17 @@ void xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni( c1 = (float*) ((uintptr_t) c1 + cn_stride); c2 = (float*) ((uintptr_t) c2 + cn_stride); c3 = (float*) ((uintptr_t) c3 + cn_stride); + c4 = (float*) ((uintptr_t) c4 + cn_stride); + c5 = (float*) ((uintptr_t) c5 + cn_stride); + c6 = (float*) ((uintptr_t) c6 + cn_stride); nc -= 16; } else { // Prepare mask for valid 32-bit elements (depends on nc). const __mmask16 vmask = _cvtu32_mask16((UINT32_C(1) << nc) - 1); + _mm512_mask_storeu_ps(c6, vmask, vscaled6x0123456789ABCDEF); + _mm512_mask_storeu_ps(c5, vmask, vscaled5x0123456789ABCDEF); + _mm512_mask_storeu_ps(c4, vmask, vscaled4x0123456789ABCDEF); _mm512_mask_storeu_ps(c3, vmask, vscaled3x0123456789ABCDEF); _mm512_mask_storeu_ps(c2, vmask, vscaled2x0123456789ABCDEF); _mm512_mask_storeu_ps(c1, vmask, vscaled1x0123456789ABCDEF); diff --git a/src/configs/gemm-config.c b/src/configs/gemm-config.c index c139ef00115..75bd17c4a31 100644 --- a/src/configs/gemm-config.c +++ b/src/configs/gemm-config.c @@ -1969,25 +1969,25 @@ static void init_qd8_f32_qc8w_gemm_config(void) { const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config(); assert(hardware_config != NULL); if (!XNN_PLATFORM_MOBILE && hardware_config->use_x86_avx512vnni) { - qd8_f32_qc8w_gemm_config.minmax.dqgemm[XNN_MR_TO_INDEX(4)] = xnn_init_hmp_dqgemm_ukernel((xnn_dqgemm_ukernel_fn) xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c4__avx512vnni); + qd8_f32_qc8w_gemm_config.minmax.dqgemm[XNN_MR_TO_INDEX(7)] = xnn_init_hmp_dqgemm_ukernel((xnn_dqgemm_ukernel_fn) xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c4__avx512vnni); qd8_f32_qc8w_gemm_config.minmax.dqgemm[XNN_MR_TO_INDEX(1)] = xnn_init_hmp_dqgemm_ukernel((xnn_dqgemm_ukernel_fn) xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c4__avx512vnni); - qd8_f32_qc8w_gemm_config.minmax.dqigemm[XNN_MR_TO_INDEX(4)] = xnn_init_hmp_dqigemm_ukernel((xnn_dqigemm_ukernel_fn) xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c4__avx512vnni); + qd8_f32_qc8w_gemm_config.minmax.dqigemm[XNN_MR_TO_INDEX(7)] = xnn_init_hmp_dqigemm_ukernel((xnn_dqigemm_ukernel_fn) xnn_qd8_f32_qc8w_igemm_minmax_ukernel_7x16c4__avx512vnni); qd8_f32_qc8w_gemm_config.minmax.dqigemm[XNN_MR_TO_INDEX(1)] = xnn_init_hmp_dqigemm_ukernel((xnn_dqigemm_ukernel_fn) xnn_qd8_f32_qc8w_igemm_minmax_ukernel_1x16c4__avx512vnni); qd8_f32_qc8w_gemm_config.init.f32 = xnn_init_f32_minmax_scalar_params; qd8_f32_qc8w_gemm_config.pack_gemm_gio = (xnn_packw_gemm_gio_ukernel_fn) xnn_pack_qs8_gemm_gio_w; qd8_f32_qc8w_gemm_config.pack_gemm_goi = (xnn_packw_gemm_goi_ukernel_fn) xnn_pack_qs8_gemm_goi_w; - qd8_f32_qc8w_gemm_config.mr = 4; + qd8_f32_qc8w_gemm_config.mr = 7; qd8_f32_qc8w_gemm_config.nr = 16; qd8_f32_qc8w_gemm_config.log2_kr = 2; } else if (!XNN_PLATFORM_MOBILE && hardware_config->use_x86_avx512skx) { - qd8_f32_qc8w_gemm_config.minmax.dqgemm[XNN_MR_TO_INDEX(4)] = xnn_init_hmp_dqgemm_ukernel((xnn_dqgemm_ukernel_fn) xnn_qd8_f32_qc8w_gemm_minmax_ukernel_4x16c8__avx512skx); + qd8_f32_qc8w_gemm_config.minmax.dqgemm[XNN_MR_TO_INDEX(7)] = xnn_init_hmp_dqgemm_ukernel((xnn_dqgemm_ukernel_fn) xnn_qd8_f32_qc8w_gemm_minmax_ukernel_7x16c8__avx512skx); qd8_f32_qc8w_gemm_config.minmax.dqgemm[XNN_MR_TO_INDEX(1)] = xnn_init_hmp_dqgemm_ukernel((xnn_dqgemm_ukernel_fn) xnn_qd8_f32_qc8w_gemm_minmax_ukernel_1x16c8__avx512skx); - qd8_f32_qc8w_gemm_config.minmax.dqigemm[XNN_MR_TO_INDEX(4)] = xnn_init_hmp_dqigemm_ukernel((xnn_dqigemm_ukernel_fn) xnn_qd8_f32_qc8w_igemm_minmax_ukernel_4x16c8__avx512skx); + qd8_f32_qc8w_gemm_config.minmax.dqigemm[XNN_MR_TO_INDEX(7)] = xnn_init_hmp_dqigemm_ukernel((xnn_dqigemm_ukernel_fn) xnn_qd8_f32_qc8w_igemm_minmax_ukernel_7x16c8__avx512skx); qd8_f32_qc8w_gemm_config.minmax.dqigemm[XNN_MR_TO_INDEX(1)] = xnn_init_hmp_dqigemm_ukernel((xnn_dqigemm_ukernel_fn) xnn_qd8_f32_qc8w_igemm_minmax_ukernel_1x16c8__avx512skx); qd8_f32_qc8w_gemm_config.init.f32 = xnn_init_f32_minmax_scalar_params; qd8_f32_qc8w_gemm_config.pack_gemm_gio = (xnn_packw_gemm_gio_ukernel_fn) xnn_pack_qs8_gemm_gio_w; qd8_f32_qc8w_gemm_config.pack_gemm_goi = (xnn_packw_gemm_goi_ukernel_fn) xnn_pack_qs8_gemm_goi_w; - qd8_f32_qc8w_gemm_config.mr = 4; + qd8_f32_qc8w_gemm_config.mr = 7; qd8_f32_qc8w_gemm_config.nr = 16; qd8_f32_qc8w_gemm_config.log2_kr = 3; } else if (hardware_config->use_x86_xop) {