-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdemo_video.py
94 lines (68 loc) · 2.89 KB
/
demo_video.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import os
import sys
import argparse
import time
import cv2
from config_reader import read_config
from processing import extract_parts
from output import draw
from model.cmu_model import get_testing_model
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
start_datetime = time.strftime("-%m-%d-%H-%M-%S", time.localtime())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--video', type=str, required=True, help='input video file name')
parser.add_argument('--model', type=str, default='model/keras/model.h5', help='path to the weights file')
parser.add_argument('--frame_ratio', type=int, default=1, help='analyze every [n] frames')
parser.add_argument('--process_speed', type=int, default=4,
help='Int 1 (fastest, lowest quality) to 4 (slowest, highest quality)')
parser.add_argument('--end', type=int, default=None, help='Last video frame to analyze')
args = parser.parse_args()
keras_weights_file = args.model
frame_rate_ratio = args.frame_ratio
process_speed = args.process_speed
ending_frame = args.end
print('start processing...')
# Video input
video = args.video
video_path = 'videos/'
video_file = video_path + video
# Output location
output_path = 'videos/outputs/'
output_format = '.mp4'
video_output = output_path + video + str(start_datetime) + output_format
# load model
# authors of original model don't use
# vgg normalization (subtracting mean) on input images
model = get_testing_model()
model.load_weights(keras_weights_file)
# load config
params, model_params = read_config()
# Video reader
cam = cv2.VideoCapture(video_file)
input_fps = cam.get(cv2.CAP_PROP_FPS)
ret_val, orig_image = cam.read()
video_length = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
if ending_frame is None:
ending_frame = video_length
# Video writer
output_fps = input_fps / frame_rate_ratio
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(video_output, fourcc, output_fps, (orig_image.shape[1], orig_image.shape[0]))
scale_search = [1, .5, 1.5, 2] # [.5, 1, 1.5, 2]
scale_search = scale_search[0:process_speed]
params['scale_search'] = scale_search
i = 0 # default is 0
while(cam.isOpened()) and ret_val is True and i < ending_frame:
if i % frame_rate_ratio == 0:
input_image = cv2.cvtColor(orig_image, cv2.COLOR_RGB2BGR)
tic = time.time()
# generate image with body parts
subsets, candidates = extract_parts(input_image, params, model, model_params)
canvas = draw(orig_image, subsets, candidates)
print('Processing frame: ', i)
toc = time.time()
print('processing time is %.5f' % (toc - tic))
out.write(canvas)
ret_val, orig_image = cam.read()
i += 1