diff --git a/logreplay/hypes_yaml/town05.yaml b/logreplay/hypes_yaml/town05.yaml new file mode 100644 index 00000000..6ace8dea --- /dev/null +++ b/logreplay/hypes_yaml/town05.yaml @@ -0,0 +1,30 @@ +- sensor_list: + - id: -41 + - name: 'lidar' + args: &base_lidar + upper_fov: 2 + lower_fov: -25 + channels: 32 + rotation_frequency: 20 + points_per_second: 250000 + range: 120 + thresh: 5 # mininum number of points hit to be regarded as visible + global_position: [-200, 78, 3, 45] + - name: 'camera' + args: &base_camera + fov: 100 + image_size_x: 200 + image_size_y: 150 + global_position: [-200, 78, 3, 45] + relative_pose: front +- sensor_list: + - id: -42 + name: 'lidar' + args: + <<: *base_lidar + global_position: [-175, 102, 3, -135] + - name: 'camera' + args: + <<: *base_camera + global_position: [-175, 102, 3, -135] + relative_pose: front \ No newline at end of file diff --git a/logreplay/scenario/scenarios_manager.py b/logreplay/scenario/scenarios_manager.py index f1886db5..6fcdbbbc 100644 --- a/logreplay/scenario/scenarios_manager.py +++ b/logreplay/scenario/scenarios_manager.py @@ -71,8 +71,8 @@ def tick(self): if __name__ == '__main__': from opencood.hypes_yaml.yaml_utils import load_yaml scene_params = load_yaml('../hypes_yaml/replay.yaml') - scenarion_manager = ScenariosManager(scenario_params=scene_params) - scenarion_manager.tick() + scenario_manager = ScenariosManager(scenario_params=scene_params) + scenario_manager.tick() print('test passed') diff --git a/logreplay/scenario/scene_manager.py b/logreplay/scenario/scene_manager.py index e5fac07a..d6e8dd46 100644 --- a/logreplay/scenario/scene_manager.py +++ b/logreplay/scenario/scene_manager.py @@ -81,6 +81,7 @@ def __init__(self, folder, scene_name, collection_params, scenario_params): # this is used to dynamically save all information of the objects self.veh_dict = OrderedDict() + self.rsu_list = [] # used to count timestamp self.cur_count = 0 # used for HDMap @@ -163,7 +164,7 @@ def tick(self): # spawn the sensor on each cav if 'sensor_manager' not in self.veh_dict[cav_id]: self.veh_dict[cav_id]['sensor_manager'] = \ - SensorManager(cav_id, self.veh_dict[cav_id], + SensorManager(cav_id, self.veh_dict[cav_id]['actor'], self.world, self.scenario_params['sensor'], self.output_root) @@ -190,9 +191,22 @@ def tick(self): self.structure_transform_bg_veh( bg_veh_content['location'], bg_veh_content['angle'])) - # remove the vehicles that are not in any cav's scope - self.destroy_vehicle(cur_timestamp) - + + if not self.rsu_list: + # currently supports town_name.yaml e.g. Town05.yaml, this can also be changed to scene-specific e.g. 2021_08_21_21_35_56.yaml + rsu_yml = load_yaml(f'../hypes_yaml/replay.yaml/{self.town_name}.yaml') + for sensor_instance in rsu_yml: + rsu_id = str(sensor_instance['sensor_list'][0]['id']) + sm = SensorManager(rsu_id, None, + self.world, sensor_instance, + self.output_root) + self.rsu_list.append(sm) + + ''' + self.destroy_vehicle(cur_timestamp) is not used + since vehicles may be inside rsu sensor area even though they are outside of cav's scope + ''' + self.cur_count += 1 self.world.tick() @@ -218,6 +232,10 @@ def sensor_dumping(self, cur_timestamp): if 'sensor_manager' in veh_content: veh_content['sensor_manager'].run_step(cur_timestamp) + if self.rsu_list is not None: + for rsu_sensor in self.rsu_list: + rsu_sensor.run_step(cur_timestamp) + def spawn_cav(self, cav_id, cav_content, cur_timestamp): """ Spawn the cav based on current content. @@ -344,9 +362,6 @@ def move_vehicle(self, veh_id, cur_timestamp, transform): def close(self): self.world.apply_settings(self.origin_settings) - actor_list = self.world.get_actors() - for actor in actor_list: - actor.destroy() self.map_manager.destroy() self.sensor_destory() @@ -354,6 +369,9 @@ def sensor_destory(self): for veh_id, veh_content in self.veh_dict.items(): if 'sensor_manager' in veh_content: veh_content['sensor_manager'].destroy() + if self.rsu_list is not None: + for rsu_sensor in self.rsu_list: + rsu_sensor.destroy() def destroy_vehicle(self, cur_timestamp): """ diff --git a/logreplay/sensors/camera.py b/logreplay/sensors/camera.py new file mode 100644 index 00000000..c61faa69 --- /dev/null +++ b/logreplay/sensors/camera.py @@ -0,0 +1,147 @@ +import os +import numpy as np +import cv2 +import carla +import weakref +from logreplay.sensors.base_sensor import BaseSensor + +class Camera(BaseSensor): + + """ + Camera manager for vehicle or infrastructure. + + Parameters + ---------- + vehicle : carla.Vehicle + The carla.Vehicle, this is for cav. + + world : carla.World + The carla world object, this is for rsu. + + global_position : list + Global position of the infrastructure, [x, y, z] + + relative_position : str + Indicates the sensor is a front or rear camera. option: + front, left, right. + + Attributes + ---------- + image : np.ndarray + Current received rgb image. + sensor : carla.sensor + The carla sensor that mounts at the vehicle. + + """ + + def __init__(self, agent_id, vehicle, world, config, global_position=None): + + super().__init__(agent_id, vehicle, world, config, global_position) + + if vehicle is not None: + world = vehicle.get_world() + + self.vehicle = vehicle + self.agent_id = agent_id + self.name = 'camera' + + blueprint = world.get_blueprint_library().find('sensor.camera.rgb') + blueprint.set_attribute('fov', str(config['fov'])) + blueprint.set_attribute('image_size_x', str(config['image_size_x'])) + blueprint.set_attribute('image_size_y', str(config['image_size_y'])) + + self.relative_position = config['relative_pose'] + self.relative_position_id = ['front', 'right', 'left', 'back', 'bev'] + + if vehicle is None: + spawn_point = self.spawn_point_estimation(None, global_position) + self.sensor = world.spawn_actor(blueprint, spawn_point) + else: + spawn_point = self.spawn_point_estimation(self.relative_position, None) + self.sensor = world.spawn_actor(blueprint, spawn_point, attach_to=vehicle) + self.name += str(self.relative_position) + + self.image = None + self.timstamp = None + self.frame = 0 + weak_self = weakref.ref(self) + self.sensor.listen(lambda event: Camera._on_data_event(weak_self, event)) + + # camera attributes + self.image_width = int(self.sensor.attributes['image_size_x']) + self.image_height = int(self.sensor.attributes['image_size_y']) + + @staticmethod + def spawn_point_estimation(relative_position, global_position): + + pitch = 0 + carla_location = carla.Location(x=0, y=0, z=0) + + if global_position is not None: + carla_location = carla.Location( + x=global_position[0], + y=global_position[1], + z=global_position[2]) + # pitch = -35 + carla_rotation = carla.Rotation(roll=0, yaw=global_position[3], pitch=pitch) + + else: + if relative_position == 'front': + carla_location = carla.Location(x=carla_location.x + 2.5, + y=carla_location.y, + z=carla_location.z + 1.0) + yaw = 0 + + elif relative_position == 'right': + carla_location = carla.Location(x=carla_location.x + 0.0, + y=carla_location.y + 0.3, + z=carla_location.z + 1.8) + yaw = 100 + + elif relative_position == 'left': + carla_location = carla.Location(x=carla_location.x + 0.0, + y=carla_location.y - 0.3, + z=carla_location.z + 1.8) + yaw = -100 + elif relative_position == 'back': + carla_location = carla.Location(x=carla_location.x - 2.0, + y=carla_location.y, + z=carla_location.z + 1.5) + yaw = 180 + else: + raise NotImplementedError + + carla_rotation = carla.Rotation(roll=0, yaw=yaw, pitch=pitch) + + spawn_point = carla.Transform(carla_location, carla_rotation) + + return spawn_point + + @staticmethod + def _on_data_event(weak_self, event): + """CAMERA method""" + self = weak_self() + if not self: + return + image = np.array(event.raw_data) + image = image.reshape((self.image_height, self.image_width, 4)) + # we need to remove the alpha channel + image = image[:, :, :3] + + self.image = image + self.frame = event.frame + self.timestamp = event.timestamp + + def data_dump(self, output_root, cur_timestamp): + + while not hasattr(self, 'image') or self.image is None: + continue + + if self.vehicle is None: ### + image_name = f'{cur_timestamp}_camera.png' + else: + + pose_id = self.relative_position_id.index(self.relative_position) + image_name = f'{cur_timestamp}_camera{pose_id}.png' + + cv2.imwrite(os.path.join(output_root, image_name), self.image) diff --git a/logreplay/sensors/lidar.py b/logreplay/sensors/lidar.py new file mode 100644 index 00000000..02a4fa8b --- /dev/null +++ b/logreplay/sensors/lidar.py @@ -0,0 +1,136 @@ +import weakref + +import carla +import os +import open3d as o3d +import numpy as np +from logreplay.sensors.base_sensor import BaseSensor + +class Lidar(BaseSensor): + + def __init__(self, agent_id, vehicle, world, config, global_position): + super().__init__(agent_id, vehicle, world, config, global_position) + + if vehicle is not None: + world = vehicle.get_world() + + self.vehicle = vehicle + self.agent_id = agent_id + self.name = 'lidar' + + blueprint = world.get_blueprint_library().find('sensor.lidar.ray_cast') + blueprint.set_attribute('upper_fov', str(config['upper_fov'])) + blueprint.set_attribute('lower_fov', str(config['lower_fov'])) + blueprint.set_attribute('channels', str(config['channels'])) + blueprint.set_attribute('range', str(config['range'])) + blueprint.set_attribute('points_per_second', str(config['points_per_second'])) + blueprint.set_attribute('rotation_frequency', str(config['rotation_frequency'])) + + if vehicle is None: + spawn_point = self.spawn_point_estimation(None, global_position) + self.sensor = world.spawn_actor(blueprint, spawn_point) + else: + self.relative_position = config['relative_pose'] + self.relative_position_id = ['front', 'right', 'left', 'back'] + spawn_point = self.spawn_point_estimation(self.relative_position, None) + self.sensor = world.spawn_actor(blueprint, spawn_point, attach_to=vehicle) + self.name += str(self.relative_position) + + # lidar data + self.thresh = config['thresh'] + self.data = None + self.timestamp = None + self.frame = 0 + weak_self = weakref.ref(self) + self.sensor.listen(lambda event: Lidar._on_data_event(weak_self, event)) + + @staticmethod + def _on_data_event(weak_self, event): + """Lidar method""" + self = weak_self() + if not self: + return + + # retrieve the raw lidar data and reshape to (N, 4) + data = np.copy(np.frombuffer(event.raw_data, dtype=np.dtype('f4'))) + # (x, y, z, intensity) + data = np.reshape(data, (int(data.shape[0] / 4), 4)) + + self.data = data + self.frame = event.frame + self.timestamp = event.timestamp + + @staticmethod + def spawn_point_estimation(relative_position, global_position): + + pitch = 0 + carla_location = carla.Location(x=0, y=0, z=0) + + if global_position is not None: + carla_location = carla.Location( + x=global_position[0], + y=global_position[1], + z=global_position[2]) + + carla_rotation = carla.Rotation(pitch=pitch, yaw=global_position[3], roll=0) + + else: + + if relative_position == 'front': + carla_location = carla.Location(x=carla_location.x + 2.5, + y=carla_location.y, + z=carla_location.z + 1.0) + yaw = 0 + + elif relative_position == 'right': + carla_location = carla.Location(x=carla_location.x + 0.0, + y=carla_location.y + 0.3, + z=carla_location.z + 1.8) + yaw = 100 + + elif relative_position == 'left': + carla_location = carla.Location(x=carla_location.x + 0.0, + y=carla_location.y - 0.3, + z=carla_location.z + 1.8) + yaw = -100 + else: + carla_location = carla.Location(x=carla_location.x - 2.0, + y=carla_location.y, + z=carla_location.z + 1.5) + yaw = 180 + + carla_rotation = carla.Rotation(roll=0, yaw=yaw, pitch=pitch) + + spawn_point = carla.Transform(carla_location, carla_rotation) + + return spawn_point + + def data_dump(self, output_root, cur_timestamp): + + while not hasattr(self, 'data') or self.data is None: + continue + + point_cloud = self.data + point_xyz = point_cloud[:, :-1] + point_intensity = point_cloud[:, -1] + point_intensity = np.c_[ + point_intensity, + np.zeros_like(point_intensity), + np.zeros_like(point_intensity) + ] + + o3d_pcd = o3d.geometry.PointCloud() + o3d_pcd.points = o3d.utility.Vector3dVector(point_xyz) + o3d_pcd.colors = o3d.utility.Vector3dVector(point_intensity) + + # write to pcd file + if self.vehicle is None: + pcd_name = f'{cur_timestamp}.pcd' + else: + pose_id = self.relative_position_id.index(self.relative_position) + pcd_name = f'{cur_timestamp}_lidar{pose_id}.pcd' + + o3d.io.write_point_cloud(os.path.join(output_root, + pcd_name), + pointcloud=o3d_pcd, + write_ascii=True) diff --git a/logreplay/sensors/semantic_lidar.py b/logreplay/sensors/semantic_lidar.py index 9d8c39a1..cf0fbf58 100644 --- a/logreplay/sensors/semantic_lidar.py +++ b/logreplay/sensors/semantic_lidar.py @@ -5,10 +5,38 @@ import weakref import carla -import cv2 +import os +import open3d as o3d import numpy as np from logreplay.sensors.base_sensor import BaseSensor +# ref: https://github.com/carla-simulator/carla/blob/master/PythonAPI/examples/open3d_lidar.py +LABEL_COLORS = np.array([ + (255, 255, 255), # None + (70, 70, 70), # Building + (100, 40, 40), # Fences + (55, 90, 80), # Other + (220, 20, 60), # Pedestrian + (153, 153, 153), # Pole + (157, 234, 50), # RoadLines + (128, 64, 128), # Road + (244, 35, 232), # Sidewalk + (107, 142, 35), # Vegetation + (0, 0, 142), # Vehicle + (102, 102, 156), # Wall + (220, 220, 0), # TrafficSign + (70, 130, 180), # Sky + (81, 0, 81), # Ground + (150, 100, 100), # Bridge + (230, 150, 140), # RailTrack + (180, 165, 180), # GuardRail + (250, 170, 30), # TrafficLight + (110, 190, 160), # Static + (170, 120, 50), # Dynamic + (45, 60, 150), # Water + (145, 170, 100), # Terrain +]) / 255.0 # normalize each channel [0-1] since is what Open3D uses + class SemanticLidar(BaseSensor): def __init__(self, agent_id, vehicle, world, config, global_position): @@ -17,6 +45,7 @@ def __init__(self, agent_id, vehicle, world, config, global_position): if vehicle is not None: world = vehicle.get_world() + self.vehicle = vehicle self.agent_id = agent_id blueprint = world.get_blueprint_library(). \ @@ -58,6 +87,54 @@ def __init__(self, agent_id, vehicle, world, config, global_position): lambda event: SemanticLidar._on_data_event( weak_self, event)) + @staticmethod + def spawn_point_estimation(relative_position, global_position): + + pitch = 0 + carla_location = carla.Location(x=0, y=0, z=0) + + if global_position is not None: + carla_location = carla.Location( + x=global_position[0], + y=global_position[1], + z=global_position[2]) + # pitch = -35 + + carla_rotation = carla.Rotation(roll=0, yaw=global_position[3], pitch=pitch) + + else: + + if relative_position == 'front': + carla_location = carla.Location(x=carla_location.x + 2.5, + y=carla_location.y, + z=carla_location.z + 1.0) + yaw = 0 + + elif relative_position == 'right': + carla_location = carla.Location(x=carla_location.x + 0.0, + y=carla_location.y + 0.3, + z=carla_location.z + 1.8) + yaw = 100 + + elif relative_position == 'left': + carla_location = carla.Location(x=carla_location.x + 0.0, + y=carla_location.y - 0.3, + z=carla_location.z + 1.8) + yaw = -100 + elif relative_position == 'back': + carla_location = carla.Location(x=carla_location.x - 2.0, + y=carla_location.y, + z=carla_location.z + 1.5) + yaw = 180 + else: + raise NotImplementedError + + carla_rotation = carla.Rotation(roll=0, yaw=yaw, pitch=pitch) + + spawn_point = carla.Transform(carla_location, carla_rotation) + + return spawn_point + @staticmethod def _on_data_event(weak_self, event): """Semantic Lidar method""" @@ -76,50 +153,32 @@ def _on_data_event(weak_self, event): self.obj_tag = np.array(data['ObjTag']) self.obj_idx = np.array(data['ObjIdx']) - self.data = data self.frame = event.frame self.timestamp = event.timestamp - @staticmethod - def spawn_point_estimation(relative_position, global_position): + def data_dump(self, output_root, cur_timestamp): - pitch = 0 - carla_location = carla.Location(x=0, y=0, z=0) + while not hasattr(self, 'data') or self.data is None: + continue - if global_position is not None: - carla_location = carla.Location( - x=global_position[0], - y=global_position[1], - z=global_position[2]) - pitch = -35 - - if relative_position == 'front': - carla_location = carla.Location(x=carla_location.x + 2.5, - y=carla_location.y, - z=carla_location.z + 1.0) - yaw = 0 - - elif relative_position == 'right': - carla_location = carla.Location(x=carla_location.x + 0.0, - y=carla_location.y + 0.3, - z=carla_location.z + 1.8) - yaw = 100 - - elif relative_position == 'left': - carla_location = carla.Location(x=carla_location.x + 0.0, - y=carla_location.y - 0.3, - z=carla_location.z + 1.8) - yaw = -100 - else: - carla_location = carla.Location(x=carla_location.x - 2.0, - y=carla_location.y, - z=carla_location.z + 1.5) - yaw = 180 + point_cloud = self.points + label_color = LABEL_COLORS[self.obj_tag] - carla_rotation = carla.Rotation(roll=0, yaw=yaw, pitch=pitch) - spawn_point = carla.Transform(carla_location, carla_rotation) + o3d_pcd = o3d.geometry.PointCloud() + o3d_pcd.points = o3d.utility.Vector3dVector(point_cloud) + o3d_pcd.colors = o3d.utility.Vector3dVector(label_color) - return spawn_point + # write to pcd file + if self.vehicle is None: + pcd_name = f'{cur_timestamp}.pcd' + else: + pose_id = self.relative_position_id.index(self.relative_position) + pcd_name = f'{cur_timestamp}_semlidar{pose_id}.pcd' + + o3d.io.write_point_cloud(os.path.join(output_root, + pcd_name), + pointcloud=o3d_pcd, + write_ascii=True) def tick(self): while self.obj_idx is None or self.obj_tag is None or \ @@ -138,3 +197,4 @@ def tick(self): # these are the ids that are visible return vehicle_id_filter + diff --git a/logreplay/sensors/sensor_manager.py b/logreplay/sensors/sensor_manager.py index 226c6eba..8ecdc14f 100644 --- a/logreplay/sensors/sensor_manager.py +++ b/logreplay/sensors/sensor_manager.py @@ -41,7 +41,7 @@ def __init__(self, agent_id, self.agent_id = agent_id self.output_root = output_root - self.vehicle = vehicle_content['actor'] + self.vehicle = vehicle_content self.world = world self.sensor_list = [] # this is used to gather the meta information return from sensors @@ -66,12 +66,16 @@ def __init__(self, agent_id, 'same as the file name. e.g. ' \ 'bev_semantic_camera -> ' \ 'BevSemanticCamera' - # todo: rsu is not considered yet + + global_position = None + if 'global_position' in sensor_content['args']: + global_position = sensor_content['args']['global_position'] + sensor_instance = sensor(self.agent_id, self.vehicle, self.world, sensor_content['args'], - None) + global_position) self.sensor_list.append(sensor_instance) def run_step(self, cur_timestamp): @@ -84,7 +88,7 @@ def run_step(self, cur_timestamp): # for data dumping output_folder = os.path.join(self.output_root, - self.agent_id) + str(self.agent_id)) if not os.path.exists(output_folder): os.makedirs(output_folder) sensor_instance.data_dump(output_folder,