Skip to content

Commit

Permalink
Revert Workaround of Disabling QWEN2_VL in Convergence Tests (#466)
Browse files Browse the repository at this point in the history
## Summary

After fix #464

We can revert some changes in

- #463
- #459

Which are workarounds of

#461

<!---
## Details
This is an optional section; is there anything specific that reviewers
should be aware of?
--->

## Testing Done
<!--- This is a required section; please describe how this change was
tested. --->

<!-- 
Replace BLANK with your device type. For example, A100-80G-PCIe

Complete the following tasks before sending your PR, and replace `[ ]`
with
`[x]` to indicate you have done them. 
-->

- Hardware Type: <BLANK>
- [ ] run `make test` to ensure correctness
- [X] run `make checkstyle` to ensure code style
- [X] run `make test-convergence` to ensure convergence

---------

Signed-off-by: Austin Liu <[email protected]>
  • Loading branch information
austin362667 authored Dec 11, 2024
1 parent 78e8a85 commit 96859d8
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 18 deletions.
13 changes: 4 additions & 9 deletions test/convergence/test_mini_models_multimodal.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@

import pytest
import torch
import transformers
from datasets import load_dataset
from packaging import version
from torch.utils.data import DataLoader
from transformers import PreTrainedTokenizerFast

Expand Down Expand Up @@ -380,9 +378,8 @@ def run_mini_model_multimodal(
5e-3,
1e-5,
marks=pytest.mark.skipif(
not QWEN2_VL_AVAILABLE
or version.parse(transformers.__version__) >= version.parse("4.47.0"),
reason="Qwen2-VL not available in this version of transformers or transformers version >= 4.47.0",
not QWEN2_VL_AVAILABLE,
reason="Qwen2-VL not available in this version of transformers",
),
),
pytest.param(
Expand All @@ -401,10 +398,8 @@ def run_mini_model_multimodal(
not supports_bfloat16(), reason="bfloat16 not supported on this GPU"
),
pytest.mark.skipif(
not QWEN2_VL_AVAILABLE
or version.parse(transformers.__version__)
>= version.parse("4.47.0"),
reason="Qwen2-VL not available in this version of transformers or transformers version >= 4.47.0",
not QWEN2_VL_AVAILABLE,
reason="Qwen2-VL not available in this version of transformers",
),
],
),
Expand Down
13 changes: 4 additions & 9 deletions test/convergence/test_mini_models_with_logits.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@

import pytest
import torch
import transformers
from datasets import load_from_disk
from packaging import version
from torch.utils.data import DataLoader
from transformers.models.gemma import GemmaConfig, GemmaForCausalLM
from transformers.models.gemma2 import Gemma2Config, Gemma2ForCausalLM
Expand Down Expand Up @@ -540,9 +538,8 @@ def run_mini_model(
5e-3,
1e-5,
marks=pytest.mark.skipif(
not QWEN2_VL_AVAILABLE
or version.parse(transformers.__version__) >= version.parse("4.47.0"),
reason="Qwen2-VL not available in this version of transformers or transformers version >= 4.47.0",
not QWEN2_VL_AVAILABLE,
reason="Qwen2-VL not available in this version of transformers",
),
),
pytest.param(
Expand All @@ -561,10 +558,8 @@ def run_mini_model(
not supports_bfloat16(), reason="bfloat16 not supported on this GPU"
),
pytest.mark.skipif(
not QWEN2_VL_AVAILABLE
or version.parse(transformers.__version__)
>= version.parse("4.47.0"),
reason="Qwen2-VL not available in this version of transformers or transformers version >= 4.47.0",
not QWEN2_VL_AVAILABLE,
reason="Qwen2-VL not available in this version of transformers",
),
],
),
Expand Down

0 comments on commit 96859d8

Please sign in to comment.