From e996b896372055ddb647ad34ea38c1140b11f9c0 Mon Sep 17 00:00:00 2001 From: baptiste Date: Thu, 8 Aug 2024 12:15:18 +0000 Subject: [PATCH] install from source with unpinned torch --- .gitignore | 2 ++ Makefile | 7 ++++--- setup.py | 12 ++++++++---- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index b30407e0..3315d216 100644 --- a/.gitignore +++ b/.gitignore @@ -172,3 +172,5 @@ work-in-progress/ experiments/ amdsmi/ amd-* + +external_repos/ \ No newline at end of file diff --git a/Makefile b/Makefile index e30c1062..06062867 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ build_cpu_image: docker build --build-arg IMAGE=optimum-benchmark:latest-cpu --build-arg USER_ID=$(USER_ID) --build-arg GROUP_ID=$(GROUP_ID) -t optimum-benchmark:latest-cpu docker/unroot build_cuda_image: - docker build --build-arg TORCH_VERSION=$(TORCH_VERSION) -t optimum-benchmark:latest-cuda docker/cuda + docker build -t optimum-benchmark:latest-cuda docker/cuda docker build --build-arg IMAGE=optimum-benchmark:latest-cuda --build-arg USER_ID=$(USER_ID) --build-arg GROUP_ID=$(GROUP_ID) -t optimum-benchmark:latest-cuda docker/unroot build_cuda_ort_image: @@ -109,7 +109,8 @@ install_cli_cpu_neural_compressor: pip install -e .[testing,peft,timm,diffusers,neural-compressor] install_cli_cuda_pytorch: - pip install -e .[testing,timm,diffusers,peft,autoawq,auto-gptq,bitsandbytes,deepspeed] + python scripts/install_autoawq.py + pip install -e .[testing,timm,diffusers,peft,auto-gptq,bitsandbytes,deepspeed] install_cli_rocm_pytorch: pip install -e .[testing,timm,diffusers,peft,autoawq,auto-gptq,deepspeed] @@ -159,7 +160,7 @@ test_cli_cuda_pytorch_multi_gpu: pytest -s -k "cli and cuda and pytorch and (dp or ddp or device_map or deepspeed) and not awq" test_cli_cuda_pytorch_single_gpu: - pytest -s -k "cli and cuda and pytorch and not (dp or ddp or device_map or deepspeed) and not awq" + pytest -s -k "cli and cuda and pytorch and not (dp or ddp or device_map or deepspeed) and not awq" --ignore=external_repos test_cli_cuda_torch_ort_multi_gpu: pytest -s -k "cli and cuda and torch-ort and (dp or ddp or device_map or deepspeed) and not peft" diff --git a/setup.py b/setup.py index ffc15cc5..8782ba7d 100644 --- a/setup.py +++ b/setup.py @@ -58,13 +58,14 @@ "Please install amdsmi from https://github.com/ROCm/amdsmi to enable this feature." ) + + if USE_ROCM: AUTOAWQ = "autoawq@https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.1/autoawq-0.2.1+rocm571-cp310-cp310-linux_x86_64.whl" AUTOGPTQ = "auto-gptq@https://huggingface.github.io/autogptq-index/whl/rocm573/auto-gptq/auto_gptq-0.7.1%2Brocm5.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl" -else: - AUTOAWQ = "autoawq@git+https://github.com/casper-hansen/AutoAWQ.git" - AUTOAWQ_KERNELS = "autoawq-kernels@git+https://github.com/casper-hansen/AutoAWQ_kernels.git" +else: + # AUTOAWQ will be installed from source via scripts/install_autoawq.py script AUTOGPTQ = "auto-gptq@git+https://github.com/PanQiWei/AutoGPTQ.git" EXTRAS_REQUIRE = { @@ -81,7 +82,7 @@ "py-txi": ["py-txi"], "vllm": ["vllm"], # optional dependencies - "autoawq": [AUTOAWQ_KERNELS, AUTOAWQ], + "autoawq": [], "auto-gptq": ["optimum", AUTOGPTQ], "sentence-transformers": ["sentence-transformers"], "bitsandbytes": ["bitsandbytes"], @@ -93,6 +94,9 @@ "peft": ["peft"], } +if USE_ROCM: + EXTRAS_REQUIRE["autoawq"] = [AUTOAWQ] + setup( packages=find_packages(),