Skip to content

Commit

Permalink
Fix FlashAttention debug test, FP32 assert (llama/7684)
Browse files Browse the repository at this point in the history
  • Loading branch information
JohannesGaessler authored and ggerganov committed Jun 16, 2024
1 parent a16137d commit 9b3d784
Showing 1 changed file with 0 additions and 4 deletions.
4 changes: 0 additions & 4 deletions ggml-cuda/fattn-vec-f32.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -278,14 +278,10 @@ void ggml_cuda_flash_attn_ext_vec_f32_case_impl(ggml_backend_cuda_context & ctx,

template <int D, ggml_type type_K, ggml_type type_V>
void ggml_cuda_flash_attn_ext_vec_f32_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
ggml_tensor * KQV = dst;
ggml_tensor * Q = dst->src[0];
ggml_tensor * K = dst->src[1];
ggml_tensor * V = dst->src[2];

const int32_t precision = KQV->op_params[2];
GGML_ASSERT(precision == GGML_PREC_DEFAULT);

GGML_ASSERT(K->type == type_K);
GGML_ASSERT(V->type == type_V);

Expand Down

0 comments on commit 9b3d784

Please sign in to comment.