forked from facebookresearch/xformers
-
Notifications
You must be signed in to change notification settings - Fork 6
172 lines (156 loc) · 7.33 KB
/
wheels_build.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
name: wheels_build
on:
workflow_call:
inputs:
os:
required: true
type: string
python:
required: true
type: string
torch_version:
required: true
type: string
description: "Example: 1.13.1"
toolkit_type:
required: true
type: string
description: "Example: cuda for cuda, rocm for rocm"
toolkit_short_version:
required: true
type: string
description: "Example: 117 for 11.7"
artifact_tag:
default: "facebookresearch"
type: string
# this yaml file can be cleaned up using yaml anchors, but they're not supported in github actions yet
# https://github.com/actions/runner/issues/1182
env:
# you need at least cuda 5.0 for some of the stuff compiled here.
TORCH_CUDA_ARCH_LIST: ${{ contains(inputs.toolkit_type, 'cuda') && join('6.0+PTX 7.0 7.5 8.0+PTX', fromJSON(inputs.toolkit_short_version) >= 118 && ' 9.0a' || '') || '' }}
HIP_ARCHITECTURES: ${{ contains(inputs.toolkit_type, 'rocm') && 'gfx90a gfx942' || '' }}
MAX_JOBS: 4
DISTUTILS_USE_SDK: 1 # otherwise distutils will complain on windows about multiple versions of msvc
XFORMERS_BUILD_TYPE: "Release"
TWINE_USERNAME: __token__
XFORMERS_PACKAGE_FROM: "wheel-${{ github.ref_name }}"
# https://github.blog/changelog/2024-03-07-github-actions-all-actions-will-run-on-node20-instead-of-node16-by-default/
ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: "true"
PYTORCH_INDEX_URL: "https://download.pytorch.org/whl/${{ contains(inputs.toolkit_type, 'cuda') && 'cu' || 'rocm' }}${{ inputs.toolkit_short_version }}"
jobs:
build:
name: ${{ contains(inputs.os, 'ubuntu') && 'ubuntu' || 'win' }}-py${{ inputs.python }}-pt${{ inputs.torch_version }}+${{ contains(inputs.toolkit_type, 'cuda') && 'cu' || 'rocm' }}${{ inputs.toolkit_short_version }}
runs-on: ${{ inputs.os }}
env:
# alias for the current python version
# windows does not have per version binary, it is just 'python3'
PY: python${{ contains(inputs.os, 'ubuntu') && inputs.python || '3' }}
container: ${{ contains(inputs.os, 'ubuntu') && 'quay.io/pypa/manylinux2014_x86_64' || null }}
timeout-minutes: 360
defaults:
run:
shell: bash
steps:
- if: runner.os == 'Windows'
name: Support longpaths
run: git config --system core.longpaths true
- id: cuda_info
shell: python
run: |
import os
import sys
print(sys.version)
cushort = "${{ inputs.toolkit_short_version }}"
TORCH_CUDA_DEFAULT = "121" # pytorch 2.4.0
# https://github.com/Jimver/cuda-toolkit/blob/master/src/links/linux-links.ts
full_version, install_script = {
"124": ("12.4.1", "https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run"),
"121": ("12.1.0", "https://developer.download.nvidia.com/compute/cuda/12.1.0/local_installers/cuda_12.1.0_530.30.02_linux.run"),
"118": ("11.8.0", "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"),
"6.0": ("6.0.2", "https://repo.radeon.com/amdgpu-install/6.0.2/rhel/7.9/amdgpu-install-6.0.60002-1.el7.noarch.rpm"),
"6.1": ("6.1.2", "https://repo.radeon.com/amdgpu-install/6.1.2/el/7/amdgpu-install-6.1.60102-1.el7.noarch.rpm"),
}[cushort]
with open(os.environ['GITHUB_OUTPUT'], "r+") as fp:
fp.write("CUDA_VERSION=" + full_version + "\n")
if cushort == TORCH_CUDA_DEFAULT:
fp.write("CUDA_VERSION_SUFFIX=\n")
else:
fp.write("CUDA_VERSION_SUFFIX=+" + ("cu" if "cuda" == "${{ inputs.toolkit_type }}" else "rocm") + cushort + "\n")
fp.write("CUDA_INSTALL_SCRIPT=" + install_script + "\n")
- run: echo "CUDA_VERSION_SUFFIX=${{ steps.cuda_info.outputs.CUDA_VERSION_SUFFIX }}"
- name: Recursive checkout
uses: actions/checkout@v3
with:
submodules: recursive
path: "."
fetch-depth: 0 # for tags
- if: runner.os != 'Windows'
name: (Linux) Setup venv for linux
run: |
$PY -m venv venv
. ./venv/bin/activate
which pip
echo "PY=$(which python)" >> ${GITHUB_ENV}
echo "PATH=$PATH" >> ${GITHUB_ENV}
echo "MAX_JOBS=3" >> ${GITHUB_ENV}
- name: Define version
id: xformers_version
env:
VERSION_SOURCE: ${{ github.ref_type == 'tag' && 'tag' || 'dev' }}
run: |
set -Eeuo pipefail
git config --global --add safe.directory "*"
pip install packaging ninja
version=`python packaging/compute_wheel_version.py --source $VERSION_SOURCE`
echo $version > version.txt
echo "BUILD_VERSION=$version${{ steps.cuda_info.outputs.CUDA_VERSION_SUFFIX }}" >> ${GITHUB_ENV}
echo "BUILD_VERSION=$version${{ steps.cuda_info.outputs.CUDA_VERSION_SUFFIX }}" >> ${GITHUB_OUTPUT}
which ninja
cat ${GITHUB_ENV}
- run: echo "xformers-${BUILD_VERSION}"
- run: echo "release version (will upload to PyTorch)"
if: ${{ !contains(steps.xformers_version.outputs.BUILD_VERSION, '.dev') }}
- name: Setup proper pytorch dependency in "requirements.txt"
run: |
sed -i '/torch/d' ./requirements.txt
echo "torch == ${{ inputs.torch_version }}" >> ./requirements.txt
cat ./requirements.txt
- if: runner.os == 'Windows'
name: (Windows) Setup Runner
uses: ./.github/actions/setup-windows-runner
with:
cuda: ${{ steps.cuda_info.outputs.CUDA_VERSION }}
python: ${{ inputs.python }}
- if: runner.os == 'Linux'
name: (Linux) list installed packages
run: |
yum list installed
- if: runner.os == 'Linux' && contains(inputs.toolkit_type, 'cuda')
name: (Linux) install cuda
run: >
yum install wget git prename -y &&
wget -q "${{ steps.cuda_info.outputs.CUDA_INSTALL_SCRIPT }}" -O cuda.run &&
sh ./cuda.run --silent --toolkit &&
rm ./cuda.run
- if: runner.os == 'Linux' && contains(inputs.toolkit_type, 'rocm')
name: (Linux) install rocm
run: |
yum install -y libzstd
yum install -y ${{ steps.cuda_info.outputs.CUDA_INSTALL_SCRIPT }}
amdgpu-install -y --usecase=rocm --no-dkms
echo "ROCM_PATH=/opt/rocm" >> ${GITHUB_ENV}
echo "PATH=$PATH:/opt/rocm/bin" >> ${GITHUB_ENV}
echo "MAX_JOBS=15" >> ${GITHUB_ENV}
- name: Install dependencies
run: $PY -m pip install wheel setuptools twine -r requirements.txt --extra-index-url $PYTORCH_INDEX_URL
- name: Build wheel
run: |
$PY setup.py bdist_wheel -d dist/ -k $PLAT_ARG
env:
PLAT_ARG: ${{ contains(inputs.os, 'ubuntu') && '--plat-name manylinux2014_x86_64' || '' }}
- run: du -h dist/*
- uses: actions/upload-artifact@v3
with:
name: ${{ inputs.os }}-py${{ inputs.python }}-torch${{ inputs.torch_version }}+${{ contains(inputs.toolkit_type, 'cuda') && 'cu' || 'rocm' }}${{ inputs.toolkit_short_version }}_${{ inputs.artifact_tag }}
path: dist/*.whl
# Note: it might be helpful to have additional steps that test if the built wheels actually work