Skip to content

Commit

Permalink
Remove stale TODO
Browse files Browse the repository at this point in the history
  • Loading branch information
vgokhale authored May 20, 2024
1 parent 4533a48 commit 77a0e27
Showing 1 changed file with 0 additions and 1 deletion.
1 change: 0 additions & 1 deletion python/perf-kernels/flash-attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -1095,7 +1095,6 @@ def varlen_input_helper(Z, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, dtype, equal_seqlen
@pytest.mark.parametrize('layout', ['bshd', 'bhsd'])
def test_op_fwd(Z, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, causal, use_alibi, layout, dtype=torch.float16):
torch.manual_seed(20)
# TODO: Adapt test for bshd
q, k, v, input_metadata = input_helper(Z, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, dtype, layout)
if causal:
input_metadata.need_causal()
Expand Down

0 comments on commit 77a0e27

Please sign in to comment.