forked from linkedin/Liger-Kernel
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Makefile
42 lines (35 loc) · 1.73 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
.PHONY: test checkstyle test-convergence all
all: checkstyle test test-convergence
# Command to run pytest for correctness tests
test:
python -m pytest --disable-warnings test/ --ignore=test/convergence
# Command to run flake8 (code style check), isort (import ordering), and black (code formatting)
# Subsequent commands still run if the previous fails, but return failure at the end
checkstyle:
flake8 .; flake8_status=$$?; \
isort .; isort_status=$$?; \
black .; black_status=$$?; \
if [ $$flake8_status -ne 0 ] || [ $$isort_status -ne 0 ] || [ $$black_status -ne 0 ]; then \
exit 1; \
fi
# Command to run pytest for convergence tests
# We have to explicitly set HF_DATASETS_OFFLINE=1, or dataset will silently try to send metrics and timeout (80s) https://github.com/huggingface/datasets/blob/37a603679f451826cfafd8aae00738b01dcb9d58/src/datasets/load.py#L286
test-convergence:
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/test_mini_models.py
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/test_mini_models_multimodal.py
HF_DATASETS_OFFLINE=1 python -m pytest --disable-warnings test/convergence/test_mini_models_with_logits.py
# Command to run all benchmark scripts and update benchmarking data file
# By default this doesn't overwrite existing data for the same benchmark experiment
# run with `make run-benchmarks OVERWRITE=1` to overwrite existing benchmark data
BENCHMARK_DIR = benchmark/scripts
BENCHMARK_SCRIPTS = $(wildcard $(BENCHMARK_DIR)/benchmark_*.py)
OVERWRITE ?= 0
run-benchmarks:
@for script in $(BENCHMARK_SCRIPTS); do \
echo "Running benchmark: $$script"; \
if [ $(OVERWRITE) -eq 1 ]; then \
python $$script --overwrite; \
else \
python $$script; \
fi; \
done