-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnovel_views.py
94 lines (83 loc) · 3.7 KB
/
novel_views.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
from lib.config import cfg # isort: split
import os
import imageio.v2 as imageio
import torch
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from lib.datasets.interhands_dataset import Dataset as IHDataset
from lib.datasets.novel_views_dataset import Dataset as NVDataset
from lib.networks import make_network
from lib.networks.renderer import make_renderer
from lib.utils.base_utils import init_dist, synchronize, to_cuda
from lib.utils.img_utils import fill_img
from lib.utils.net_utils import load_network_adaptive
from lib.utils.vis_utils import generate_gif, save_img
def run_evaluate():
cfg.perturb = 0
# cfg.test_dataset.ratio = cfg.ratio
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
network = make_network(cfg).to(device)
load_network_adaptive(
network,
cfg.trained_model_dir,
resume=cfg.resume,
epoch=cfg.test.epoch,
strict=False,
verbose=cfg.local_rank == 0,
adaptive=cfg.aninerf_animation or cfg.test_novel_pose,
latent_dim=cfg.latent_dim,
both_to_single=cfg.is_interhand and cfg.train_dataset.hand_type == "both",
device=device,
)
network.eval()
if cfg.test_dataset.hand_type == "both":
dataset = IHDataset(
NVDataset, data_root=cfg.test_dataset.data_root, split=cfg.test_dataset.split, ratio=cfg.test_dataset.ratio
)
else:
dataset = NVDataset(
data_root=cfg.test_dataset.data_root,
ratio=cfg.test_dataset.ratio,
hand_type=cfg.test_dataset.hand_type,
)
if cfg.distributed:
sampler = DistributedSampler(dataset, shuffle=False)
data_loader = DataLoader(
dataset, batch_size=1, shuffle=False, num_workers=cfg.train.num_workers, pin_memory=True, sampler=sampler
)
else:
data_loader = DataLoader(dataset=dataset, num_workers=cfg.train.num_workers)
renderer = make_renderer(cfg, network)
# evaluator = make_evaluator(cfg)
if cfg.is_interhand and cfg.test_novel_pose and not cfg.aninerf_animation:
result_dir = os.path.join(cfg.result_dir, "-".join(cfg.test_dataset.data_root.split("/")[-2:]))
else:
result_dir = cfg.result_dir
if cfg.use_sr != "none":
result_dir += f"-sr_{cfg.use_sr}"
elif not cfg.eval_full_img and cfg.test_dataset.ratio != 1:
cfg.result_dir += f"-ratio_{cfg.test_dataset.ratio}"
result_dir = os.path.join(result_dir, "novel", f"{cfg.render_type}_{cfg.render_view}_{len(dataset)}")
os.makedirs(result_dir, exist_ok=True)
for batch in tqdm(data_loader, dynamic_ncols=True, disable=cfg.local_rank != 0):
batch = to_cuda(batch, device)
with torch.no_grad():
output = renderer.render(batch)
# evaluator.evaluate(output, batch)
rgb_pred = output["rgb_map"][0].detach().cpu().numpy()
H, W = batch["H"].item(), batch["W"].item()
if (cfg.use_neural_renderer and cfg.neural_renderer_type in ("cnn_sr", "eg3d_sr")) or cfg.use_sr != "none":
img_pred = rgb_pred.reshape((batch["H_sr"].item(), batch["W_sr"].item(), 3))
else:
mask_at_box = batch["mask_at_box"][0].detach().cpu().numpy()
img_pred = fill_img(rgb_pred, mask_at_box.reshape(H, W))
save_img(img_pred, result_dir, cfg.render_type, int(batch["frame_index"]), int(batch["view_index"]))
synchronize()
if cfg.local_rank == 0:
print(f"images saved in '{result_dir}'")
generate_gif(result_dir, cfg.render_type, cfg.render_view, int(batch["frame_index"]))
if __name__ == "__main__":
if cfg.distributed:
init_dist()
run_evaluate()