From e7f5457ca17244111b992945bdfa6cb5d64cf8a0 Mon Sep 17 00:00:00 2001 From: Sakshay Mahna Date: Sun, 26 Jun 2022 08:29:37 +0530 Subject: [PATCH 1/3] Add Camera Position Parameters --- .../configs/default-multiple-robustness.yml | 62 +++++++++++++++++++ behavior_metrics/driver.py | 5 +- behavior_metrics/utils/script_manager.py | 5 +- behavior_metrics/utils/tmp_world_generator.py | 50 ++++++++++++++- 4 files changed, 116 insertions(+), 6 deletions(-) create mode 100644 behavior_metrics/configs/default-multiple-robustness.yml diff --git a/behavior_metrics/configs/default-multiple-robustness.yml b/behavior_metrics/configs/default-multiple-robustness.yml new file mode 100644 index 00000000..e0655516 --- /dev/null +++ b/behavior_metrics/configs/default-multiple-robustness.yml @@ -0,0 +1,62 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/F1ROS/cameraL/image_raw' + CameraConfig: + Translation: -0.2 + Rotation: 0 + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/F1ROS/odom' + Actuators: + Motors: + Motors_0: + Name: 'motors_0' + Topic: '/F1ROS/cmd_vel' + MaxV: 3 + MaxW: 0.3 + BrainPath: ['brains/f1/brain_f1_keras_seq_3_opencv_dataset.py'] + PilotTimeCycle: 50 + Parameters: + Model: ['frankenstein.h5'] + ImageCropped: True + ImageSize: [100,50] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + Type: 'f1' + Experiment: + Name: "Experiment name" + Description: "Experiment description" + Timeout: [50] + Repetitions: 1 + Simulation: + World: ['/opt/jderobot/share/jderobot/gazebo/launch/simple_circuit.launch'] + RealTimeUpdateRate: 100 + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: ['./perfect_bags/lap-simple-circuit.bag'] + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 2, 2] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [0, 3, 3, 1] + Data: rgbimage diff --git a/behavior_metrics/driver.py b/behavior_metrics/driver.py index fbe4703c..fa8dfcde 100644 --- a/behavior_metrics/driver.py +++ b/behavior_metrics/driver.py @@ -175,9 +175,10 @@ def main(): if app_configuration.current_world and not config_data['script']: logger.debug('Launching Simulation... please wait...') if config_data['random']: + camera_config = app_configuration.sensors['Cameras']['Camera_0']['CameraConfig'] tmp_world_generator(app_configuration.current_world, app_configuration.stats_perfect_lap, - app_configuration.real_time_update_rate, randomize=True, - gui=True, launch=False) + app_configuration.real_time_update_rate, camera_config, + randomize=True, gui=True, launch=False) app_configuration.current_world = 'tmp_circuit.launch' environment.launch_env(app_configuration.current_world) diff --git a/behavior_metrics/utils/script_manager.py b/behavior_metrics/utils/script_manager.py index 322a3c94..fdeb7d3b 100644 --- a/behavior_metrics/utils/script_manager.py +++ b/behavior_metrics/utils/script_manager.py @@ -39,9 +39,10 @@ def run_brains_worlds(app_configuration, controller, randomize=False): for brain_counter, brain in enumerate(app_configuration.brain_path): repetition_counter = 0 while repetition_counter < app_configuration.experiment_repetitions: + camera_config = app_configuration.sensors['Cameras']['Camera_0']['CameraConfig'] tmp_world_generator(world, app_configuration.stats_perfect_lap[world_counter], - app_configuration.real_time_update_rate, randomize=randomize, gui=False, - launch=True) + app_configuration.real_time_update_rate, camera_config, + randomize=randomize, gui=False, launch=True) pilot = Pilot(app_configuration, controller, app_configuration.brain_path[brain_counter]) pilot.daemon = True pilot.real_time_update_rate = app_configuration.real_time_update_rate diff --git a/behavior_metrics/utils/tmp_world_generator.py b/behavior_metrics/utils/tmp_world_generator.py index 95b81450..a6e59e62 100644 --- a/behavior_metrics/utils/tmp_world_generator.py +++ b/behavior_metrics/utils/tmp_world_generator.py @@ -25,6 +25,7 @@ import random import sys import math +import shutil import numpy as np @@ -33,8 +34,8 @@ from utils.logger import logger -def tmp_world_generator(current_world, stats_perfect_lap, real_time_update_rate, randomize=False, gui=False, - launch=False): +def tmp_world_generator(current_world, stats_perfect_lap, real_time_update_rate, camera_configuration, + randomize=False, gui=False, launch=False): environment.close_gazebo() tree = ET.parse(current_world) root = tree.getroot() @@ -98,7 +99,17 @@ def tmp_world_generator(current_world, stats_perfect_lap, real_time_update_rate, real_time_update_rate_element = ET.SubElement(physics_element, 'real_time_update_rate') real_time_update_rate_element.text = str(real_time_update_rate) # 1000 is the default value + # Make Model based Changes + tmp_model_generator(camera_configuration) + for child_1 in root[0]: + if child_1.tag == 'include': + for child_2 in child_1: + if child_2.text == 'model://f1_renault': + child_2.text = 'model://f1_temp' + break + tree.write('tmp_world.launch') + if launch: try: with open("/tmp/.roslaunch_stdout.log", "w") as out, open("/tmp/.roslaunch_stderr.log", "w") as err: @@ -111,3 +122,38 @@ def tmp_world_generator(current_world, stats_perfect_lap, real_time_update_rate, # give gazebo some time to initialize time.sleep(5) + + +def tmp_model_generator(camera_config): + # Create folder for Model + try: + os.mkdir('/opt/jderobot/share/jderobot/gazebo/models/f1_temp') + except FileExistsError: + pass + + model_dir = '/opt/jderobot/share/jderobot/gazebo/models/f1_renault' + + # Copy model files + shutil.copyfile(model_dir + '/model.config', '/opt/jderobot/share/jderobot/gazebo/models/f1_temp/model.config') + shutil.copyfile(model_dir + '/model.sdf', '/opt/jderobot/share/jderobot/gazebo/models/f1_temp/model.sdf') + + # Make changes to model.sdf + tree = ET.parse('/opt/jderobot/share/jderobot/gazebo/models/f1_temp/model.sdf') + root = tree.getroot() + + # Change model name + root[0].set('name', 'f1_temp') + + # Change Pose + for child in root[0][2]: + if ('name' in child.attrib) and \ + (child.attrib['name'] == 'left_cam' or child.attrib['name'] == 'cam_f1_left'): + pose = [float(p) for p in child[0].text.split(' ')] + pose[1] = pose[1] + camera_config['Translation'] + pose[4] = pose[4] + camera_config['Rotation'] + child[0].text = ' '.join(map(str, pose)) + # Save + tree.write('/opt/jderobot/share/jderobot/gazebo/models/f1_temp/model.sdf') + +if __name__ == "__main__": + tmp_model_generator('/opt/jderobot/share/jderobot/gazebo/launch/simple_circuit.launch') From 7ebedc29ae7f2c3b7a681a7a6acb2fd367c653a4 Mon Sep 17 00:00:00 2001 From: Sakshay Mahna Date: Sun, 26 Jun 2022 08:32:41 +0530 Subject: [PATCH 2/3] Add Noise Parameters --- .../brains/f1/brain_f1_explicit_noise.py | 189 ++++++++++++++++++ behavior_metrics/configs/default-noise.yml | 53 +++++ behavior_metrics/utils/noise.py | 46 +++++ 3 files changed, 288 insertions(+) create mode 100644 behavior_metrics/brains/f1/brain_f1_explicit_noise.py create mode 100644 behavior_metrics/configs/default-noise.yml create mode 100644 behavior_metrics/utils/noise.py diff --git a/behavior_metrics/brains/f1/brain_f1_explicit_noise.py b/behavior_metrics/brains/f1/brain_f1_explicit_noise.py new file mode 100644 index 00000000..7a5aaa2c --- /dev/null +++ b/behavior_metrics/brains/f1/brain_f1_explicit_noise.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import numpy as np +import threading +import time +import cv2 + +from utils.noise import add_noise + + +class Brain: + + def __init__(self, sensors, actuators, handler, config=None): + self.camera = sensors.get_camera('camera_0') + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + + self.x_middle_left_above = 0 + self.deviation_left = 0 + # self.iteration = 0 + # self.json_data = [] + self.lock = threading.Lock() + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + self.handler.update_frame(frame_id, data) + + def check_center(self, position_x): + if len(position_x[0]) > 1: + x_middle = (position_x[0][0] + position_x[0][len(position_x[0]) - 1]) / 2 + not_found = False + else: + # The center of the line is in position 326 + x_middle = 326 + not_found = True + return x_middle, not_found + + def exception_case(self, x_middle_left_middle, deviation): + dif = x_middle_left_middle - self.x_middle_left_above + + if abs(dif) < 80: + rotation = -(0.008 * deviation + 0.0005 * (deviation - self.deviation_left)) + elif abs(dif) < 130: + rotation = -(0.0075 * deviation + 0.0005 * (deviation - self.deviation_left)) + elif abs(dif) < 190: + rotation = -(0.007 * deviation + 0.0005 * (deviation - self.deviation_left)) + else: + rotation = -(0.0065 * deviation + 0.0005 * (deviation - self.deviation_left)) + + speed = 5 + return speed, rotation + + def straight_case(self, deviation, dif): + if abs(dif) < 35: + rotation = -(0.0054 * deviation + 0.0005 * (deviation - self.deviation_left)) + speed = 13 + elif abs(dif) < 90: + rotation = -(0.0052 * deviation + 0.0005 * (deviation - self.deviation_left)) + speed = 11 + else: + rotation = -(0.0049 * deviation + 0.0005 * (deviation - self.deviation_left)) + speed = 9 + + return speed, rotation + + def curve_case(self, deviation, dif): + if abs(dif) < 50: + rotation = -(0.01 * deviation + 0.0006 * (deviation - self.deviation_left)) + elif abs(dif) < 80: + rotation = -(0.0092 * deviation + 0.0005 * (deviation - self.deviation_left)) + elif abs(dif) < 130: + rotation = -(0.0087 * deviation + 0.0005 * (deviation - self.deviation_left)) + elif abs(dif) < 190: + rotation = -(0.008 * deviation + 0.0005 * (deviation - self.deviation_left)) + else: + rotation = -(0.0075 * deviation + 0.0005 * (deviation - self.deviation_left)) + + speed = 5 + return speed, rotation + + def get_point(self, index, img): + mid = 0 + if np.count_nonzero(img[index]) > 0: + left = np.min(np.nonzero(img[index])) + right = np.max(np.nonzero(img[index])) + mid = np.abs(left - right) / 2 + left + return int(mid) + + def execute(self): + image = self.camera.getImage().data + if image.shape == (3, 3, 3): + time.sleep(3) + + self.update_frame('frame_0', image) + # cv2.imwrite(PRETRAINED_MODELS + 'montmelo_data/' + str(self.iteration) + '.jpg', image) + # self.iteration += 1 + + try: + if 'NoiseType' in self.config: + image = add_noise(image, self.config['NoiseParams'], + noise_type = self.config['NoiseType']) + self.update_frame('frame_1', image) + + image_cropped = image[230:, :, :] + image_hsv = cv2.cvtColor(image_cropped, cv2.COLOR_BGR2HSV) + lower_red = np.array([0, 50, 50]) + upper_red = np.array([180, 255, 255]) + image_mask = cv2.inRange(image_hsv, lower_red, upper_red) + + # show image in gui -> frame_0 + + rows, cols = image_mask.shape + rows = rows - 1 # para evitar desbordamiento + + alt = 0 + ff = cv2.reduce(image_mask, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S) + if np.count_nonzero(ff[:, 0]) > 0: + alt = np.min(np.nonzero(ff[:, 0])) + + points = [] + for i in range(3): + if i == 0: + index = alt + else: + index = rows // (2 * i) + points.append((self.get_point(index, image_mask), index)) + + points.append((self.get_point(rows, image_mask), rows)) + + # We convert to show it + # Shape gives us the number of rows and columns of an image + size = image_mask.shape + rows = size[0] + columns = size[1] + + # We look for the position on the x axis of the pixels that have value 1 in different positions and + position_x_down = np.where(image_mask[points[3][1], :]) + position_x_middle = np.where(image_mask[points[1][1], :]) + position_x_above = np.where(image_mask[points[2][1], :]) + + # We see that white pixels have been located and we look if the center is located + # In this way we can know if the car has left the circuit + x_middle_left_down, not_found_down = self.check_center(position_x_down) + x_middle_left_middle, not_found_middle = self.check_center(position_x_middle) + + # We look if white pixels of the row above are located + if (len(position_x_above[0]) > 1): + self.x_middle_left_above = (position_x_above[0][0] + position_x_above[0][ + len(position_x_above[0]) - 1]) / 2 + # We look at the deviation from the central position. The center of the line is in position cols/2 + deviation = self.x_middle_left_above - (cols / 2) + + # If the row below has been lost we have a different case, which we treat as an exception + if not_found_down == True: + speed, rotation = self.exception_case(x_middle_left_middle, deviation) + else: + # We check is formula 1 is in curve or straight + dif = x_middle_left_down - self.x_middle_left_above + x = float(((-dif) * (310 - 350))) / float(260 - 350) + x_middle_left_down + + if abs(x - x_middle_left_middle) < 2: + speed, rotation = self.straight_case(deviation, dif) + else: + speed, rotation = self.curve_case(deviation, dif) + + # We update the deviation + self.deviation_left = deviation + else: + # If the formula 1 leaves the red line, the line is searched + if self.x_middle_left_above > (columns / 2): + rotation = -1 + else: + rotation = 1 + speed = -0.6 + + self.motors.sendV(speed) + self.motors.sendW(rotation) + # self.json_data.append({'v': speed, 'w': rotation}) + # with open(PRETRAINED_MODELS + 'montmelo_data/data.json', 'w') as outfile: + # json.dump(self.json_data, outfile) + + except Exception as err: + print(err) diff --git a/behavior_metrics/configs/default-noise.yml b/behavior_metrics/configs/default-noise.yml new file mode 100644 index 00000000..57a8fe75 --- /dev/null +++ b/behavior_metrics/configs/default-noise.yml @@ -0,0 +1,53 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/F1ROS/cameraL/image_raw' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/F1ROS/odom' + Actuators: + Motors: + Motors_0: + Name: 'motors_0' + Topic: '/F1ROS/cmd_vel' + MaxV: 3 + MaxW: 0.3 + + BrainPath: 'brains/f1/brain_f1_explicit_noise.py' + PilotTimeCycle: 50 + Parameters: + NoiseType: 'salt&pepper' # two options, gaussian (Mean, Std_Dev) and salt&pepper(Probability) + NoiseParams: + Mean: 0 + Std_Dev: 5 + Probability: 0.2 + Type: 'f1' + Simulation: + World: /opt/jderobot/share/jderobot/gazebo/launch/simple_circuit.launch + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 2, 2] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [0, 3, 3, 1] + Data: rgbimage diff --git a/behavior_metrics/utils/noise.py b/behavior_metrics/utils/noise.py new file mode 100644 index 00000000..5e60a23f --- /dev/null +++ b/behavior_metrics/utils/noise.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +""" +This module contains the code responsible for adding noise +It can be in the images or the motor commands + +Note: Gazebo can't seem to simulate any noise other than Gaussian +Separate noise module can simulate various types of noises +""" +import numpy as np +import random + +def add_noise(image, config, noise_type = "gaussian"): + """ + Function to add noise + """ + if noise_type == "gaussian": + return gaussian_noise(image, config["Mean"], config["Std_Dev"]) + elif noise_type == "salt&pepper": + return salt_and_pepper_noise(image, config["Probability"]) + else: + return image + +def salt_and_pepper_noise(image, probability = 0.2): + """ + Salt and Pepper Noise Function + """ + output = image.copy() + + salt = np.array([255, 255, 255], dtype='uint8') + pepper = np.array([0, 0, 0], dtype='uint8') + + probs = np.random.random(output.shape[:2]) + output[probs < (probability / 2)] = pepper + output[probs > 1 - (probability / 2)] = salt + + return output.astype(np.uint8) + +def gaussian_noise(image, mean = 0, std_dev = 0.01): + """ + Gaussian Noise + """ + noise = np.random.normal(mean, std_dev, image.shape) + noise = noise.reshape(image.shape) + output = image + noise + + return output.astype(np.uint8) \ No newline at end of file From d1c657e1d579519ad92233f478efa88763c219a0 Mon Sep 17 00:00:00 2001 From: Sakshay Mahna Date: Thu, 30 Jun 2022 19:50:44 +0530 Subject: [PATCH 3/3] Add Noise to Image Handler Pipeline --- behavior_metrics/brains/brains_handler.py | 12 ++++++++++-- behavior_metrics/configs/default-noise.yml | 8 ++------ behavior_metrics/utils/noise.py | 18 +++++------------- 3 files changed, 17 insertions(+), 21 deletions(-) diff --git a/behavior_metrics/brains/brains_handler.py b/behavior_metrics/brains/brains_handler.py index c691fd69..454d69dd 100755 --- a/behavior_metrics/brains/brains_handler.py +++ b/behavior_metrics/brains/brains_handler.py @@ -5,8 +5,11 @@ from abc import abstractmethod from albumentations import ( - Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, + GaussNoise, Lambda ) +from utils.noise import salt_and_pepper_noise, gaussian_noise + """ TODO: fix neural brains """ @@ -101,10 +104,15 @@ def transform_image(self, image, option): augmentation_option = Compose([RandomFog(always_apply=True)]) elif option == 'sunflare': augmentation_option = Compose([RandomSunFlare(always_apply=True)]) + elif option == 'gaussian': + augmentation_option = Compose([GaussNoise(var_limit = 1000, always_apply=True)]) + # augmentation_option = Compose([Lambda(name = 'gaussian_noise', image = gaussian_noise, always_apply = True)]) + elif option == 'salt&pepper': + augmentation_option = Compose([Lambda(name = 'salt_pepper_noise', image = salt_and_pepper_noise, always_apply = True)]) transformed_image = augmentation_option(image=image) transformed_image = transformed_image["image"] return transformed_image @abstractmethod def execute(self): - pass + pass \ No newline at end of file diff --git a/behavior_metrics/configs/default-noise.yml b/behavior_metrics/configs/default-noise.yml index 57a8fe75..1df00df0 100644 --- a/behavior_metrics/configs/default-noise.yml +++ b/behavior_metrics/configs/default-noise.yml @@ -17,14 +17,10 @@ Behaviors: MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/f1/brain_f1_explicit_noise.py' + BrainPath: 'brains/f1/brain_f1_opencv.py' PilotTimeCycle: 50 Parameters: - NoiseType: 'salt&pepper' # two options, gaussian (Mean, Std_Dev) and salt&pepper(Probability) - NoiseParams: - Mean: 0 - Std_Dev: 5 - Probability: 0.2 + ImageTranform: 'gaussian' Type: 'f1' Simulation: World: /opt/jderobot/share/jderobot/gazebo/launch/simple_circuit.launch diff --git a/behavior_metrics/utils/noise.py b/behavior_metrics/utils/noise.py index 5e60a23f..b086b985 100644 --- a/behavior_metrics/utils/noise.py +++ b/behavior_metrics/utils/noise.py @@ -9,21 +9,11 @@ import numpy as np import random -def add_noise(image, config, noise_type = "gaussian"): - """ - Function to add noise - """ - if noise_type == "gaussian": - return gaussian_noise(image, config["Mean"], config["Std_Dev"]) - elif noise_type == "salt&pepper": - return salt_and_pepper_noise(image, config["Probability"]) - else: - return image - -def salt_and_pepper_noise(image, probability = 0.2): +def salt_and_pepper_noise(image, **kwargs): """ Salt and Pepper Noise Function """ + probability = 0.2 output = image.copy() salt = np.array([255, 255, 255], dtype='uint8') @@ -35,10 +25,12 @@ def salt_and_pepper_noise(image, probability = 0.2): return output.astype(np.uint8) -def gaussian_noise(image, mean = 0, std_dev = 0.01): +def gaussian_noise(image, **kwargs): """ Gaussian Noise """ + mean = 0 + std_dev = 10.0 noise = np.random.normal(mean, std_dev, image.shape) noise = noise.reshape(image.shape) output = image + noise