Skip to content

Commit

Permalink
added fix so dqn and ddpg run in similar conditions than qlearn
Browse files Browse the repository at this point in the history
actions need a little time to take effect on the environment
  • Loading branch information
rubenlucas93 committed Mar 16, 2023
1 parent 1284d46 commit a8f6577
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 15 deletions.
8 changes: 0 additions & 8 deletions rl_studio/envs/gazebo/f1/models/f1_env_camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,6 @@ def calculate_observation(self, state: list) -> list:

def step(self, action) -> Tuple:

self._gazebo_unpause()

vel_cmd = Twist()
vel_cmd.linear.x = self.actions[action][0]
vel_cmd.angular.z = self.actions[action][1]
Expand All @@ -139,7 +137,6 @@ def step(self, action) -> Tuple:
end = time.time()
#print(end - start)

self._gazebo_pause()
points = self.processed_image(f1_image_camera.data)
state = self.calculate_observation(points)
center = float(self.config.center_image - points[0]) / (
Expand Down Expand Up @@ -170,7 +167,6 @@ def reset(self):
else:
self._gazebo_reset()

self._gazebo_unpause()

# Get camera info
image_data = None
Expand All @@ -189,12 +185,9 @@ def reset(self):
state = self.calculate_observation(points)
# reset_state = (state, False)

self._gazebo_pause()

return state

def inference(self, action):
self._gazebo_unpause()

vel_cmd = Twist()
vel_cmd.linear.x = self.config.ACTIONS_SET[action][0]
Expand All @@ -207,7 +200,6 @@ def inference(self, action):
cv_image = CvBridge().imgmsg_to_cv2(image_data, "bgr8")
f1_image_camera = self.image_msg_to_image(image_data, cv_image)

self._gazebo_pause()

points = self.processed_image(f1_image_camera.data)
state = self.calculate_observation(points)
Expand Down
2 changes: 0 additions & 2 deletions rl_studio/envs/gazebo/f1/models/reset.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,9 @@ def reset_f1_state_sp(self):
else:
self._gazebo_set_fix_pose_f1_follow_right_lane()

self._gazebo_unpause()

##==== get image from sensor camera
f1_image_camera, _ = self.f1gazeboimages.get_camera_info()
self._gazebo_pause()

##==== calculating State
# simplified perception as observation
Expand Down
2 changes: 1 addition & 1 deletion rl_studio/envs/gazebo/f1/models/simplified_perception.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def processed_image(self, img, height, width, x_row, center_image):
:parameters: input image 640x480
:return:
centrals: lists with distance to center in pixels
cntrals_normalized: lists with distance in range [0,1] for calculating rewards
centrals_normalized: lists with distance in range [0,1] for calculating rewards
"""
image_middle_line = height // 2
img_sliced = img[image_middle_line:]
Expand Down
6 changes: 2 additions & 4 deletions rl_studio/envs/gazebo/f1/models/step.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from geometry_msgs.msg import Twist
import numpy as np
import time

from rl_studio.agents.utils import (
print_messages,
Expand Down Expand Up @@ -51,20 +52,19 @@ def step_followline_state_image_actions_discretes(self, action, step):
return state, reward, done, {}

def step_followline_state_sp_actions_discretes(self, action, step):
self._gazebo_unpause()
vel_cmd = Twist()
vel_cmd.linear.x = self.actions[action][0]
vel_cmd.angular.z = self.actions[action][1]
self.vel_pub.publish(vel_cmd)

##==== get image from sensor camera
f1_image_camera, _ = self.f1gazeboimages.get_camera_info()
self._gazebo_pause()

##==== get center
points_in_red_line, _ = self.simplifiedperception.processed_image(
f1_image_camera.data, self.height, self.width, self.x_row, self.center_image
)

if self.state_space == "spn":
self.point = points_in_red_line[self.poi]
else:
Expand Down Expand Up @@ -176,15 +176,13 @@ def __init__(self, **config):
self.name = config["states"]

def step_followlane_state_sp_actions_discretes(self, action, step):
self._gazebo_unpause()
vel_cmd = Twist()
vel_cmd.linear.x = self.actions[action][0]
vel_cmd.angular.z = self.actions[action][1]
self.vel_pub.publish(vel_cmd)

##==== get image from sensor camera
f1_image_camera, _ = self.f1gazeboimages.get_camera_info()
self._gazebo_pause()

##==== get center
centrals_in_lane, centrals_in_lane_normalized = self.simplifiedperception.processed_image(
Expand Down

0 comments on commit a8f6577

Please sign in to comment.