Skip to content

Commit

Permalink
backing up
Browse files Browse the repository at this point in the history
  • Loading branch information
rajdeep28 committed Feb 9, 2022
1 parent bf87ac3 commit d2fe952
Show file tree
Hide file tree
Showing 4 changed files with 97 additions and 862 deletions.
383 changes: 0 additions & 383 deletions .ipynb_checkpoints/onnx_notebook-checkpoint.ipynb

This file was deleted.

1 change: 1 addition & 0 deletions label_map.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"1": "class_1", "2": "class_2", "3": "class_3", "4": "class_4", "5": "class_5", "6": "class_6"}
192 changes: 96 additions & 96 deletions onnx_deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
import time
import pathlib
import onnxruntime as rt
import json
import json
import pandas as pd
CWD_PATH = os.getcwd()

class stop_sign_onnx():
Expand All @@ -17,66 +18,63 @@ class stop_sign_onnx():
def __init__(self, onnx_path, stop_and_speed_limit_classes):

self.sess = rt.InferenceSession(onnx_path)
self.stop_and_speed_limit_classes = stop_and_speed_limit_classes
self.classes = classes
self.threshold = 0.5


def sess_run(self, img_data, sess): # onnx inference
def sess_run(self, img_data, sess):
# we want the outputs in this order
outputs = ["num_detections", "detection_boxes", "detection_scores", "detection_classes"]
result = self.sess.run(outputs, {"input_tensor": img_data})
num_detections, detection_boxes, detection_scores, detection_classes = result
return result

def creating_json_file(self, tail): # Creating json file with same video name
json_name = tail[:-4]
json_name = json_name+'.json'
return json_name


def json_dump(self, draw, d, c, s, img, frame_number, json_name, stop_and_speed_limit_classes): # dumping json data
"""Draw box and label for 1 detection."""

def coordinates(self, draw, d):
width, height = draw.im.size
print('width :' , width)
print('height :' , height)
top = int(d[0] * height)
left = int(d[1] * width)
bottom = int(d[2] * height)
right = int(d[3] * width)
s = (s * 100 + 0.5) / 100.0
label = self.stop_and_speed_limit_classes[c]
# the box is relative to the image size so we multiply with height and width to get pixels.
top = d[0] * height
left = d[1] * width
bottom = d[2] * height
right = d[3] * width
top = int(max(0, np.floor(top + 0.5).astype('int32')))
left = int(max(0, np.floor(left + 0.5).astype('int32')))
bottom = int(min(height, np.floor(bottom + 0.5).astype('int32')))
right = int(min(width, np.floor(right + 0.5).astype('int32')))
return top, left, bottom, right


def json_dump(self, draw, d, c, s, img, frame_number, json_name, stop_and_speed_limit_classes):

top, left, bottom, right = self.coordinates(draw, d)
label = self.classes[c[0]]
label1 = label+" : "+str(s)
print('objects : ', label1)
print('---\\\\-----detected_sign_frame---\\\\-----',frame_number)
pixel_location = [left, top, right, bottom]

entry = {
"timestampe" : frame_number*100,
"object" : {
"timestampe" : frame_number,
"object" : [{
"objName" :label,
"confidenceLevel" : s,
"confidenceLevel" : float(s),
"pixelLocation" : pixel_location,

},
}
]}
#outfile = open('myfile.json', "a")
with open(json_name, 'a', encoding='utf-8') as f:
json.dump(entry, f)



def draw_detection(self, draw, d, c, s, img): # draw detected bounding box
def draw_detection(self, draw, d, c, s, img):
"""Draw box and label for 1 detection."""
width, height = draw.im.size
print('width :' , width)
print('height :' , height)
top = d[0] * height
left = d[1] * width
bottom = d[2] * height
right = d[3] * width
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(height, np.floor(bottom + 0.5).astype('int32'))
right = min(width, np.floor(right + 0.5).astype('int32'))
label = self.stop_and_speed_limit_classes[c]

top, left, bottom, right = self.coordinates(draw, d)
label = self.classes[c[0]]
s = str(round(s, 2))
label1 = label+" : "+s
print('objects : ', label1)
label = label+" : "+s
print('objects : ', label)
label_size = draw.textsize(label)
if top - label_size[1] >= 0:
text_origin = tuple(np.array([left, top - label_size[1]]))
Expand All @@ -89,79 +87,81 @@ def draw_detection(self, draw, d, c, s, img): # draw detected bounding box
img = np.array(img)
return img

def save_inference_video(self, image_path, sess, frame_number, json_name): # Saving inference data
start_time = time.time()
img = Image.fromarray(image_path)
img_data = image_path
img_data = np.expand_dims(img_data.astype(np.uint8), axis=0)
result = self.sess_run(img_data, sess)
def _inference_(self, img_array, frame_number, json_name):

img = Image.fromarray(img_array)
img_data = np.expand_dims(img_array.astype(np.uint8), axis=0)
result = self.sess_run(img_data, self.sess)
num_detections = result[0]
detection_boxes = result[1]
detection_scores = result[2]
detection_classes = result[3]
batch_size = num_detections.shape[0]
#print(batch_size)
draw = ImageDraw.Draw(img)
#print(draw)
for batch in range(0, batch_size):
for detection in range(0, int(num_detections[batch])):
if detection_scores[0][detection] > 0.5:
c = detection_classes[batch][detection]
if detection_scores[0][detection] > self.threshold:
c = str(detection_classes[batch][detection])
d = detection_boxes[batch][detection]
s = detection_scores[0][detection]
self.json_dump(draw, d, c, s, img, frame_number, json_name, stop_and_speed_limit_classes)
out = self.draw_detection(draw, d, c, s, img)
return out
end_time = time.time() - start_time
print('onnx_inference_time : ',end_time )

def save_video(self,TEST_VIDEO_PATHS): # loading video and saving video
start = time.time()
for video_path in TEST_VIDEO_PATHS:
video_path = str(video_path)
head_tail = os.path.split(video_path)
tail = head_tail[1]
cap = cv2.VideoCapture(video_path)
output_path = output_video_dir + '/output_'+tail
print('output_path :', output_path)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), 30, (frame_width, frame_height))
property_id = int(cv2.CAP_PROP_FRAME_COUNT)
total_number_of_frames = int(cv2.VideoCapture.get(cap, property_id))
print('--total_number_of_frames--', total_number_of_frames)
if debug == True :
out = self.draw_detection(draw, d, c, s, img)
return out

json_name = self.creating_json_file(tail)
print('----------json------', json_name)
while (cap.isOpened()):
ret, img = cap.read()
if not ret: break
frame_number = cap.get(cv2.CAP_PROP_POS_FRAMES)
print('--frame_number--',frame_number)
img = self.save_inference_video(img, self.sess, frame_number, json_name)
out.write(img)
cap.release()
out.release()
end = time.time() - start
print('total_time : ', end, 'sec')

def video_dict(self, cap):
utc_clock = pd.Timestamp(timestamp_str, tz='utc')
milliseconds_delta = pd.Timedelta(100, unit='milli')
while (cap.isOpened()):
ret, img = cap.read()
if not ret: break
frame_number = cap.get(cv2.CAP_PROP_POS_FRAMES)
print('--frame_number--',frame_number)
self._inference_(img, frame_number, json_name)
cap.release()

def video_debug(self, cap):
output_path = output_video_dir + '/output_'+video_name
print('output_path :', output_path)
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), 30, (frame_width, frame_height))
while (cap.isOpened()):
ret, img = cap.read()
if not ret: break
frame_number = cap.get(cv2.CAP_PROP_POS_FRAMES)
print('--frame_number--',frame_number)
img = self._inference_(img, frame_number, json_name)
out.write(img)


cap.release()
out.release()


if __name__ == '__main__':

input_video_path = 'input_video_path'
output_video_dir = 'output_videos_path'
input_video_path = 'test.mp4'
output_video_dir = 'output_videos_dir'
onnx_path = 'model.onnx'
classes = {
1: 'class_1',
2: 'class_2',
3: 'class_3',
4: 'class_4',
5: 'class_5',
6: 'class_6',
}
debug = True
classes = json.load(open("label_map.txt"))

PATH_TO_TEST_VIDEO_DIR = pathlib.Path(input_video_path)
TEST_VIDEO_PATHS = sorted(list(PATH_TO_TEST_VIDEO_DIR.glob("*.mp4")))
onnx = stop_sign_onnx(onnx_path, stop_and_speed_limit_classes)
onnx.save_video(TEST_VIDEO_PATHS)

video_path ,video_name = os.path.split(input_video_path)
cap = cv2.VideoCapture(input_video_path)

frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

property_id = int(cv2.CAP_PROP_FRAME_COUNT)
total_number_of_frames = int(cv2.VideoCapture.get(cap, property_id))
print('--total_number_of_frames--', total_number_of_frames)
json_name = video_name[:-4]+'.json'
print('----------json------', json_name)

if debug == False :
onnx.video_dict(cap)
else:
onnx.video_debug(cap)


Loading

0 comments on commit d2fe952

Please sign in to comment.