diff --git a/eval.py b/eval.py index bd4a075410..8ddbbe9a4e 100644 --- a/eval.py +++ b/eval.py @@ -29,25 +29,11 @@ torch._inductor.config.triton.cudagraphs = True torch._dynamo.config.cache_size_limit = 100000 -try: - import lm_eval +import lm_eval - lm_eval_available = True -except: - lm_eval_available = False - - -if lm_eval_available: - try: # lm_eval version 0.4 - from lm_eval.evaluator import evaluate - from lm_eval.models.huggingface import HFLM as eval_wrapper - from lm_eval.tasks import get_task_dict - except: # lm_eval version 0.3 - from lm_eval import base, evaluator, tasks - - eval_wrapper = base.BaseLM - get_task_dict = tasks.get_task_dict - evaluate = evaluator.evaluate +from lm_eval.evaluator import evaluate +from lm_eval.models.huggingface import HFLM as eval_wrapper +from lm_eval.tasks import get_task_dict def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill( diff --git a/requirements.txt b/requirements.txt index ddc939519b..7b80203737 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,7 +19,7 @@ snakeviz sentencepiece numpy gguf -lm-eval +lm-eval==0.4 blobfile # Build tools