-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathMakefile
104 lines (96 loc) · 3.49 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
.PHONY: gradient_job dataset sequence_data
#################################################################################
# GLOBALS #
#################################################################################
SETTINGS_FILE = "settings_local.json"
GRADIENT_JOB = "attention_numbers__round-number-easy"
GRADIENT_PROJECT = pr1hc80pw
GRADIENT_MACHINE = GPU+
GRADIENT_TEAM = "tensor-league"
GRADIENT_API_KEY := $(shell cat ~/.paperspace/credentials | grep $(GRADIENT_TEAM) -A1 | tail -1 | cut -d= -f2-)
#################################################################################
# COMMANDS #
#################################################################################
## Submit Gradient job
gradient_job:
gradient jobs create \
--name $(GRADIENT_JOB) \
--projectId $(GRADIENT_PROJECT) \
--container tensorflow/tensorflow:2.0.0a0-gpu-py3-jupyter \
--machineType $(GRADIENT_MACHINE) \
--command "/paperspace/run_script.sh" \
--ignoreFiles "raw,processed,env" \
--apiKey $(GRADIENT_API_KEY); rm deep-math.zip
## Downloads the synthetic dataset and extracts to data/raw
dataset:
mkdir -p data/raw/
cd data/raw/; gsutil cp gs://mathematics-dataset/mathematics_dataset-v1.0.tar.gz .
cd data/raw; tar -xvzf mathematics_dataset-v1.0.tar.gz
cd data/raw; rm mathematics_dataset-v1.0.tar.gz
## Processes raw question-answer pairs into form needed for training models
sequence_data:
python src/sequences.py --settings $(SETTINGS_FILE)
## Downloads pre-trained models from GitHub releases
download_models:
mkdir -p src/models/
cd src/; wget https://github.com/lvwerra/deep-math/releases/download/v0.1/models.zip
cd src/; unzip models.zip
cd src/; rm models.zip
#################################################################################
# Self Documenting Commands #
#################################################################################
.DEFAULT_GOAL := help
# Inspired by <http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html>
# sed script explained:
# /^##/:
# * save line in hold space
# * purge line
# * Loop:
# * append newline + line to hold space
# * go to next line
# * if line starts with doc comment, strip comment character off and loop
# * remove target prerequisites
# * append hold space (+ newline) to line
# * replace newline plus comments by `---`
# * print line
# Separate expressions are necessary because labels cannot be delimited by
# semicolon; see <http://stackoverflow.com/a/11799865/1968>
.PHONY: help
help:
@echo "$$(tput bold)Available rules:$$(tput sgr0)"
@echo
@sed -n -e "/^## / { \
h; \
s/.*//; \
:doc" \
-e "H; \
n; \
s/^## //; \
t doc" \
-e "s/:.*//; \
G; \
s/\\n## /---/; \
s/\\n/ /g; \
p; \
}" ${MAKEFILE_LIST} \
| LC_ALL='C' sort --ignore-case \
| awk -F '---' \
-v ncol=$$(tput cols) \
-v indent=19 \
-v col_on="$$(tput setaf 6)" \
-v col_off="$$(tput sgr0)" \
'{ \
printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
n = split($$2, words, " "); \
line_length = ncol - indent; \
for (i = 1; i <= n; i++) { \
line_length -= length(words[i]) + 1; \
if (line_length <= 0) { \
line_length = ncol - indent - length(words[i]) - 1; \
printf "\n%*s ", -indent, " "; \
} \
printf "%s ", words[i]; \
} \
printf "\n"; \
}' \
| more $(shell test $(shell uname) = Darwin && echo '--no-init --raw-control-chars')