@@ -3058,8 +3058,8 @@ static void ggml_vec_dot_q4_2_q8_0(const int n, float * restrict s, const void *
3058
3058
float sumf = 0.0 ;
3059
3059
3060
3060
#if defined(__ARM_NEON )
3061
- float sum0 = 0.0f ;
3062
- float sum1 = 0.0f ;
3061
+ float32x4_t sumv0 = vdupq_n_f32 ( 0.0f ) ;
3062
+ float32x4_t sumv1 = vdupq_n_f32 ( 0.0f ) ;
3063
3063
3064
3064
for (int i = 0 ; i < nb ; i += 2 ) {
3065
3065
const block_q4_2 * restrict x0_0 = & x [2 * (i + 0 ) + 0 ];
@@ -3100,10 +3100,21 @@ static void ggml_vec_dot_q4_2_q8_0(const int n, float * restrict s, const void *
3100
3100
const int8x16_t v1_1h = vld1q_s8 (y1 -> qs + 16 );
3101
3101
3102
3102
#if defined(__ARM_FEATURE_DOTPROD )
3103
- sum0 += (GGML_FP16_TO_FP32 (x0_0 -> d )* y0 -> d )* vaddvq_s32 (vdotq_s32 (vdupq_n_s32 (0 ), v0_0lz , v1_0l ));
3104
- sum0 += (GGML_FP16_TO_FP32 (x0_1 -> d )* y0 -> d )* vaddvq_s32 (vdotq_s32 (vdupq_n_s32 (0 ), v0_0hz , v1_0h ));
3105
- sum1 += (GGML_FP16_TO_FP32 (x1_0 -> d )* y1 -> d )* vaddvq_s32 (vdotq_s32 (vdupq_n_s32 (0 ), v0_1lz , v1_1l ));
3106
- sum1 += (GGML_FP16_TO_FP32 (x1_1 -> d )* y1 -> d )* vaddvq_s32 (vdotq_s32 (vdupq_n_s32 (0 ), v0_1hz , v1_1h ));
3103
+ const float32x4_t x0_0d = vdupq_n_f32 (GGML_FP16_TO_FP32 (x0_0 -> d ));
3104
+ const float32x4_t x0_1d = vdupq_n_f32 (GGML_FP16_TO_FP32 (x0_1 -> d ));
3105
+ const float32x4_t x1_0d = vdupq_n_f32 (GGML_FP16_TO_FP32 (x1_0 -> d ));
3106
+ const float32x4_t x1_1d = vdupq_n_f32 (GGML_FP16_TO_FP32 (x1_1 -> d ));
3107
+
3108
+ const float32x4_t y0d = vdupq_n_f32 (y0 -> d );
3109
+ const float32x4_t y1d = vdupq_n_f32 (y1 -> d );
3110
+
3111
+ sumv0 = vaddq_f32 (sumv0 , vmulq_f32 (y0d , vaddq_f32 (
3112
+ vmulq_f32 (x0_0d , vcvtq_f32_s32 (vdotq_s32 (vdupq_n_s32 (0 ), v0_0lz , v1_0l ))),
3113
+ vmulq_f32 (x0_1d , vcvtq_f32_s32 (vdotq_s32 (vdupq_n_s32 (0 ), v0_0hz , v1_0h ))))));
3114
+
3115
+ sumv1 = vaddq_f32 (sumv1 , vmulq_f32 (y1d , vaddq_f32 (
3116
+ vmulq_f32 (x1_0d , vcvtq_f32_s32 (vdotq_s32 (vdupq_n_s32 (0 ), v0_1lz , v1_1l ))),
3117
+ vmulq_f32 (x1_1d , vcvtq_f32_s32 (vdotq_s32 (vdupq_n_s32 (0 ), v0_1hz , v1_1h ))))));
3107
3118
#else
3108
3119
const int16x8_t pl0l = vmull_s8 (vget_low_s8 (v0_0lz ), vget_low_s8 (v1_0l ));
3109
3120
const int16x8_t pl0h = vmull_s8 (vget_high_s8 (v0_0lz ), vget_high_s8 (v1_0l ));
@@ -3120,14 +3131,16 @@ static void ggml_vec_dot_q4_2_q8_0(const int n, float * restrict s, const void *
3120
3131
const int32x4_t pl1 = vaddq_s32 (vpaddlq_s16 (pl1l ), vpaddlq_s16 (pl1h ));
3121
3132
const int32x4_t ph1 = vaddq_s32 (vpaddlq_s16 (ph1l ), vpaddlq_s16 (ph1h ));
3122
3133
3123
- sum0 += (GGML_FP16_TO_FP32 (x0_0 -> d )* y0 -> d )* vaddvq_s32 (pl0 );
3124
- sum0 += (GGML_FP16_TO_FP32 (x0_1 -> d )* y0 -> d )* vaddvq_s32 (ph0 );
3125
- sum1 += (GGML_FP16_TO_FP32 (x1_0 -> d )* y1 -> d )* vaddvq_s32 (pl1 );
3126
- sum1 += (GGML_FP16_TO_FP32 (x1_1 -> d )* y1 -> d )* vaddvq_s32 (ph1 );
3134
+ sumv0 = vaddq_f32 (sumv0 , vmulq_f32 (vdupq_n_f32 (y0 -> d ), vaddq_f32 (
3135
+ vmulq_f32 (vdupq_n_f32 (GGML_FP16_TO_FP32 (x0_0 -> d )), vcvtq_f32_s32 (pl0 )),
3136
+ vmulq_f32 (vdupq_n_f32 (GGML_FP16_TO_FP32 (x0_1 -> d )), vcvtq_f32_s32 (ph0 )))));
3137
+ sumv1 = vaddq_f32 (sumv1 , vmulq_f32 (vdupq_n_f32 (y1 -> d ), vaddq_f32 (
3138
+ vmulq_f32 (vdupq_n_f32 (GGML_FP16_TO_FP32 (x1_0 -> d )), vcvtq_f32_s32 (pl1 )),
3139
+ vmulq_f32 (vdupq_n_f32 (GGML_FP16_TO_FP32 (x1_1 -> d )), vcvtq_f32_s32 (ph1 )))));
3127
3140
#endif
3128
3141
}
3129
3142
3130
- sumf = sum0 + sum1 ;
3143
+ sumf = vaddvq_f32 ( sumv0 ) + vaddvq_f32 ( sumv1 ) ;
3131
3144
#else
3132
3145
// scalar
3133
3146
for (int i = 0 ; i < nb ; i ++ ) {
0 commit comments