-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.py
202 lines (168 loc) · 8.89 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
from os.path import join
import pickle as pkl
import cv2
import os
import time
# Define paths to each dataset
data_path = "data"
BADJA_PATH = "data/BADJA"
STANFORD_EXTRA_PATH = "data/StanfordExtra"
REPLICANT_PATH = "data/replicAnt_trials/SMIL_COCO"
OUTPUT_DIR = "checkpoints/{0}".format(time.strftime("%Y%m%d-%H%M%S"))
CROP_SIZE = 512 # image resolution for output
VIS_FREQUENCY = 50 # every how many iterations the model plots are to be generated
GPU_IDS = "0" # GPU number to run on (not applicable for CPU)
# Run settings (I wouldn't recommend changing these unless you have good reason)
FORCE_SMAL_PRIOR = False # Allow the more recent Unity-based prior for dogs.
ALLOW_LIMB_SCALING = True # Allow scaling parameters, see Who Left the Dogs Out?
# moved up the following line to have in one place with other prior data
UNITY_SHAPE_PRIOR = join(data_path, 'priors', 'unity_betas.npz')
# Sequence/Image Settings
SHAPE_FAMILY = -1 # Choose from Cat (e.g. House Cat/Tiger/Lion), Canine (e.g. Dog/Wolf), Equine (e.g. Horse/Zebra), Bovine (e.g. Cow), Hippo
# SEQUENCE_OR_IMAGE_NAME = "badja:rs_dog"
# SEQUENCE_OR_IMAGE_NAME = "stanfordextra:n02099601-golden_retriever/n02099601_176.jpg"
SEQUENCE_OR_IMAGE_NAME = "replicAnt:SMIL_09_synth.jpg"
IMAGE_RANGE = range(0, 1) # Frames to process from sequence. Ignored for stanford extra
WINDOW_SIZE = 10 # Changed number of frames processed in one go.
# Generate video settings
EPOCH_NAME = "st10_ep0" # convention used for the final output file. Don't change this without good reason.
# SMAL
SMAL_MODEL_PATH = join(data_path, 'SMALST', 'smpl_models')
#SMAL_FILE = join(SMAL_MODEL_PATH, 'my_smpl_00781_4_all.pkl')
# custom elements added:
# SMAL_FILE = join("3D_model_prep", 'smpl_ATTA.pkl')
SMAL_FILE = join("3D_model_prep", 'SMPL_fit.pkl')
ignore_sym = True # ignore provided symmetry file, when using custom models
ignore_hardcoded_body = True # ignore model joints in config file and use what's contained in the SMPL file
PLOT_RESULTS = True # only applies to 3D fitting (fitter_3d/optimise.py)
DEBUG = False # use to get A LOT of "useful" messages
# DATALOADER
IMG_RES = 512
# RENDERER
MESH_COLOR = [0, 172, 223]
# OPTIMIZER - You may need to adjust these depending on the sequence.
OPT_WEIGHTS = [
[25.0, 10.0, 7.5, 5.0], # Joint
[0.0, 500.0, 1000.0, 1000.0], # Sil Reproj
[0.0, 1.0, 1.0, 1.0], # Betas
[0.0, 1.0, 1.0, 1.0], # Pose
[0.0, 100.0, 100.0, 100.0], # Joint limits, suppressed in the original SMALify code!
[0.0, 0.1, 0.1, 0.1], # Splay
[500.0, 100.0, 100.0, 100.0], # Temporal
[600, 400, 600, 600], # Num iterations
[9e-2, 5e-3, 5e-4, 2e-4]] # Learning Rate
if ignore_hardcoded_body:
# this is NOT a great place for reading this in, but unless I hide the hard-coded dog stuff elsewhere,
# this is the only low-effort way to get the info dynamically without making bigger changes in smal_torch.py and co
# read in smpl file
with open(SMAL_FILE, 'rb') as f:
u = pkl._Unpickler(f)
u.encoding = 'latin1'
dd = u.load()
# get J_names | names of joints
joint_names = dd["J_names"]
if DEBUG:
print(joint_names)
# IDs of every joint that starts with b, referring to the animal body, including the tail
# as this is used for the initial alignment, we include the mandibles as well to provide a sense of left vs right
TORSO_JOINTS = [i for i, elem in enumerate(joint_names) if elem in ["b_a_1", "l_1_co_r", "l_1_co_l",
"b_h", "ma_l", "ma_r"]]
# exclude wings
WING_JOINTS = [i for i, elem in enumerate(joint_names) if elem.split("_")[0] == "w"]
# all joints
CANONICAL_MODEL_JOINTS = [i for i in range(len(joint_names))]
# ignore joints (in case annotation conventions differ)
IGNORE_JOINTS = ["b_t", "b_a_4", "b_a_5"] # temporary
# -> replicAnt data places these in different locations then SMIL, see also smal_fitter/priors/joint_limits.py
# same for all joints
MARKER_TYPE = [cv2.MARKER_STAR for i in range(len(CANONICAL_MODEL_JOINTS))]
# make it a fun rainbow, no specifics
MARKER_COLORS = [[int(255 - i * 255 / len(CANONICAL_MODEL_JOINTS)),
int(i * 255 / len(CANONICAL_MODEL_JOINTS)),
100]
for i in
range(len(CANONICAL_MODEL_JOINTS))]
N_POSE = len(joint_names) - 1 # not including global rotation
# WARNING -> Now overwritten in trainer.py line 78
try:
N_BETAS = dd["shapedirs"].shape[2] # number of SMAL shape parameters to optimize over
if DEBUG:
print("INFO: Found custom betas:", N_BETAS)
except IndexError:
# if no learned betas / shapedirs are provided default to 20
N_BETAS = 20
if DEBUG:
print("INFO: Using default num betas of", N_BETAS)
else: # use joint and plotting configuration of default dog model:
if os.name == 'nt':
## If WINDOWS
SMAL_DATA_FILE = join(SMAL_MODEL_PATH, 'my_smpl_data_00781_4_all_WIN.pkl')
SMAL_UV_FILE = join(SMAL_MODEL_PATH, 'my_smpl_00781_4_all_template_w_tex_uv_001_WIN.pkl')
SMAL_SYM_FILE = join(SMAL_MODEL_PATH, 'symIdx_WIN.pkl')
if not os.path.exists(SMAL_FILE):
print("Unable to find Windows file. Please run ./utilities/convert_smal_windows.ps1")
# PRIORS
WALKING_PRIOR_FILE = join(data_path, 'priors', 'walking_toy_symmetric_pose_prior_with_cov_35parts_WIN.pkl')
else:
SMAL_DATA_FILE = join(SMAL_MODEL_PATH, 'my_smpl_data_00781_4_all.pkl')
SMAL_UV_FILE = join(SMAL_MODEL_PATH, 'my_smpl_00781_4_all_template_w_tex_uv_001.pkl')
SMAL_SYM_FILE = join(SMAL_MODEL_PATH, 'symIdx.pkl')
# PRIORS
WALKING_PRIOR_FILE = join(data_path, 'priors', 'walking_toy_symmetric_pose_prior_with_cov_35parts.pkl')
# JOINT DEFINITIONS
TORSO_JOINTS = [2, 5, 8, 11, 12, 23]
CANONICAL_MODEL_JOINTS = [
10, 9, 8, # upper_left [paw, middle, top]
20, 19, 18, # lower_left [paw, middle, top]
14, 13, 12, # upper_right [paw, middle, top]
24, 23, 22, # lower_right [paw, middle, top]
25, 31, # tail [start, end]
33, 34, # ear base [left, right]
35, 36, # nose, chin
38, 37, # ear tip [left, right]
39, 40, # eyes [left, right]
15, 15, # withers, throat (TODO: Labelled same as throat for now), throat
28] # tail middle
# indicate invalid joints (i.e. not labelled) by -1
BADJA_ANNOTATED_CLASSES = [
14, 13, 12, # upper_left [paw, middle, top]
24, 23, 22, # lower_left [paw, middle, top]
10, 9, 8, # upper_right [paw, middle, top]
20, 19, 18, # lower_right [paw, middle, top]
25, 31, # tail [start, end] (note, missing the tail middle point)
-1, -1, # ear base [left, right]
33, -1, # nose, chin (note, missing the 'jaw base' point)
36, 35, # ear tip [left, right]
-1, -1, # eyes [left, right]
-1, 15, # withers, throat
28] # tail middle
# Visualization
MARKER_TYPE = [
cv2.MARKER_TRIANGLE_DOWN, cv2.MARKER_STAR, cv2.MARKER_CROSS, # upper_left
cv2.MARKER_TRIANGLE_DOWN, cv2.MARKER_STAR, cv2.MARKER_CROSS, # lower_left
cv2.MARKER_TRIANGLE_DOWN, cv2.MARKER_STAR, cv2.MARKER_CROSS, # upper_right
cv2.MARKER_TRIANGLE_DOWN, cv2.MARKER_STAR, cv2.MARKER_CROSS, # lower_right
cv2.MARKER_CROSS, cv2.MARKER_TRIANGLE_DOWN, # tail
cv2.MARKER_CROSS, cv2.MARKER_CROSS, # right_ear, left_ear
cv2.MARKER_CROSS, cv2.MARKER_STAR, # nose, chin
cv2.MARKER_TRIANGLE_DOWN, cv2.MARKER_TRIANGLE_DOWN, # right_tip, left_tip
cv2.MARKER_CROSS, cv2.MARKER_CROSS, # right_eye, left_eye
cv2.MARKER_CROSS, cv2.MARKER_CROSS, # withers, throat
cv2.MARKER_STAR] # tail middle
MARKER_COLORS = [
[230, 25, 75], [230, 25, 75], [230, 25, 75], # upper_left, red
[255, 255, 25], [255, 255, 25], [255, 255, 25], # lower_left, yellow
[60, 180, 75], [60, 180, 75], [60, 180, 75], # upper_right, green
[0, 130, 200], [0, 130, 200], [0, 130, 200], # lower_right, blue
[240, 50, 230], [240, 50, 230], # tail, majenta
[255, 153, 204], [29, 98, 115], # left_ear, pink & right_ear, turquoise
[245, 130, 48], [245, 130, 48], # nose, chin
[255, 153, 204], [29, 98, 115], # left_ear, pink & right_tip, turquoise
[0, 0, 0], [0, 0, 0], # right eye, left eye: black
[128, 0, 0], [128, 0, 0], # withers, throat, maroon
[240, 50, 230]] # tail middle
# TODO - remove N_POSE variable, as the number of joints should be taken from the input SMPL file instead of hard-coded
N_POSE = 34 # not including global rotation
# WARNING -> Now overwritten in trainer.py line 78
N_BETAS = 20 # number of SMAL shape parameters to optimize over
joint_names = [f"joint_{i}" for i in range(N_POSE)]