diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..fa13c8d25b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,86 @@ +[build-system] +requires = ["setuptools>=40.8.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "lm_eval" +version = "1.0.0" +authors = [ + {name="EleutherAI", email="contact@eleuther.ai"} +] +description = "A framework for evaluating language models" +readme = "README.md" +classifiers = [ + "Development Status :: 3 - Alpha", + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +requires-python = ">=3.9" +license = { "text" = "MIT" } +dependencies = [ + "accelerate>=0.21.0", + "evaluate", + "datasets>=2.0.0", + "evaluate>=0.4.0", + "jsonlines", + "numexpr", + "omegaconf>=2.2", + "peft>=0.2.0", + "pybind11>=2.6.2", + "pycountry", + "pytablewriter", + "rouge-score>=0.0.4", + "sacrebleu>=1.5.0", + "scikit-learn>=0.24.1", + "sqlitedict", + "torch>=1.8", + "tqdm-multiprocess", + "transformers>=4.1", + "zstandard", +] + +[tool.setuptools] +packages = ["lm_eval"] + +# required to include yaml files in pip installation +[tool.setuptools.package-data] +lm_eval = ["**/*.yaml", "tasks/**/*"] +examples = ["**/*.yaml"] + +[project.scripts] +lm-eval = "main:main" +lm_eval = "main:main" + +[project.urls] +Homepage = "https://github.com/EleutherAI/lm-evaluation-harness" +Repository = "https://github.com/EleutherAI/lm-evaluation-harness" + +[project.optional-dependencies] +dev = ["black", "flake8", "pre-commit", "pytest", "pytest-cov"] +linting = [ + "flake8", + "pylint", + "mypy", + "pre-commit", +] +testing = ["pytest", "pytest-cov", "pytest-xdist"] +multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1"] +sentencepiece = ["sentencepiece>=0.1.98", "protobuf>=4.22.1", "pycountry"] +promptsource = [ + "promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource" +] +gptq = ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"] +anthropic = ["anthropic"] +openai = ["openai", "tiktoken"] +all = [ + "lm_eval[dev]", + "lm_eval[testing]", + "lm_eval[linting]", + "lm_eval[multilingual]", + "lm_eval[sentencepiece]", + "lm_eval[promptsource]", + "lm_eval[gptq]", + "lm_eval[anthropic]", + "lm_eval[openai]" +] diff --git a/setup.py b/setup.py index cf8843ad68..dbe4675d06 100644 --- a/setup.py +++ b/setup.py @@ -1,77 +1,4 @@ import setuptools -import itertools -with open("README.md", "r", encoding="utf-8") as fh: - long_description = fh.read() - - -extras_require = { - "dev": ["black", "flake8", "pre-commit", "pytest", "pytest-cov"], - "linting": [ - "flake8", - "pylint", - "mypy", - "pre-commit", - ], - "testing": ["pytest", "pytest-cov", "pytest-xdist"], - "multilingual": ["nagisa>=0.2.7", "jieba>=0.42.1"], - "sentencepiece": ["sentencepiece>=0.1.98", "protobuf>=4.22.1"], - "promptsource": [ - "promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource" - ], - "gptq": ["auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"], - "anthropic": ["anthropic"], - "openai": ["openai", "tiktoken"], -} -extras_require["all"] = list(itertools.chain.from_iterable(extras_require.values())) - - -setuptools.setup( - name="lm_eval", - version="1.0.0", - author="EleutherAI", - author_email="contact@eleuther.ai", - description="A framework for evaluating language models", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/EleutherAI/lm-evaluation-harness", - packages=setuptools.find_packages(), - # required to include yaml files in pip installation - package_data={ - "lm_eval": ["**/*.yaml", "tasks/**/*"], - "examples": ["**/*.yaml"], - }, - entry_points={ - "console_scripts": ["lm-eval = main:main", "lm_eval = main:main"], - }, - include_package_data=True, - classifiers=[ - "Development Status :: 3 - Alpha", - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - python_requires=">=3.9", - install_requires=[ - "accelerate>=0.18.0", - "evaluate", - "datasets>=2.0.0", - "evaluate>=0.4.0", - "jsonlines", - "numexpr", - "omegaconf>=2.2", - "peft>=0.2.0", - "pybind11>=2.6.2", - "pycountry", - "pytablewriter", - "rouge-score>=0.0.4", - "sacrebleu==1.5.0", - "scikit-learn>=0.24.1", - "sqlitedict", - "torch>=1.8", - "tqdm-multiprocess", - "transformers>=4.1", - "zstandard", - ], - extras_require=extras_require, -) +# This is to make sure that the package supports editable installs +setuptools.setup()