Skip to content

Commit

Permalink
Auto Zoom for Rand256 #247 from sca075/refactoring_camera
Browse files Browse the repository at this point in the history
Auto Zoom for Rand256
  • Loading branch information
sca075 authored Aug 26, 2024
2 parents 4aa6e46 + c1ce3d0 commit c4d4e42
Show file tree
Hide file tree
Showing 7 changed files with 365 additions and 459 deletions.
2 changes: 1 addition & 1 deletion custom_components/mqtt_vacuum_camera/manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@
"iot_class": "local_polling",
"issue_tracker": "https://github.com/sca075/mqtt_vacuum_camera/issues",
"requirements": ["pillow>=10.3.0,<10.5.0", "numpy"],
"version": "2024.08.1"
"version": "2024.08.2"
}
17 changes: 7 additions & 10 deletions custom_components/mqtt_vacuum_camera/utils/auto_crop.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""Auto Crop Class for trimming and zooming images.
Version: 2024.08.1"""
Version: 2024.08.2"""

from __future__ import annotations

Expand Down Expand Up @@ -68,7 +68,6 @@ def _calculate_trimmed_dimensions(self):
- (self.imh.trim_up + self.imh.offset_top)
),
)

# Ensure shared reference dimensions are updated
if hasattr(self.imh.shared, "image_ref_height") and hasattr(
self.imh.shared, "image_ref_width"
Expand Down Expand Up @@ -135,8 +134,6 @@ async def async_image_margins(
self, image_array: NumpyArray, detect_colour: Color
) -> tuple[int, int, int, int]:
"""Crop the image based on the auto crop area."""
"""async_auto_trim_and_zoom_image"""

nonzero_coords = np.column_stack(np.where(image_array != list(detect_colour)))
# Calculate the trim box based on the first and last occurrences
min_y, min_x, _ = NumpyArray.min(nonzero_coords, axis=0)
Expand All @@ -157,7 +154,6 @@ async def async_check_if_zoom_is_on(
rand256: bool = False,
) -> NumpyArray:
"""Check if the image need to be zoom."""
"""async_auto_trim_and_zoom_image"""

if (
zoom
Expand All @@ -169,15 +165,17 @@ async def async_check_if_zoom_is_on(
f"{self.file_name}: Zooming the image on room {self.imh.robot_in_room['room']}."
)
if rand256:
trim_left = int(self.imh.robot_in_room["left"] / 10) - margin_size
trim_right = int(self.imh.robot_in_room["right"] / 10) + margin_size
trim_up = int(self.imh.robot_in_room["up"] / 10) - margin_size
trim_down = int(self.imh.robot_in_room["down"] / 10) + margin_size
trim_left = round(self.imh.robot_in_room["right"] / 10) - margin_size
trim_right = round(self.imh.robot_in_room["left"] / 10) + margin_size
trim_up = round(self.imh.robot_in_room["down"] / 10) - margin_size
trim_down = round(self.imh.robot_in_room["up"] / 10) + margin_size
else:
trim_left = self.imh.robot_in_room["left"] - margin_size
trim_right = self.imh.robot_in_room["right"] + margin_size
trim_up = self.imh.robot_in_room["up"] - margin_size
trim_down = self.imh.robot_in_room["down"] + margin_size
trim_left, trim_right = sorted([trim_left, trim_right])
trim_up, trim_down = sorted([trim_up, trim_down])
trimmed = image_array[trim_up:trim_down, trim_left:trim_right]
else:
# Apply the auto-calculated trims to the rotated image
Expand All @@ -191,7 +189,6 @@ async def async_rotate_the_image(
self, trimmed: NumpyArray, rotate: int
) -> NumpyArray:
"""Rotate the image and return the new array."""
"""async_auto_trim_and_zoom_image"""
if rotate == 90:
rotated = rot90(trimmed)
self.imh.crop_area = [
Expand Down
7 changes: 2 additions & 5 deletions custom_components/mqtt_vacuum_camera/utils/img_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
ImageData is part of the Image_Handler
used functions to search data in the json
provided for the creation of the new camera frame
Version: v2024.08.0
Version: v2024.08.2
"""

from __future__ import annotations
Expand Down Expand Up @@ -86,9 +86,6 @@ def find_layers(
if layer_type not in layer_dict:
layer_dict[layer_type] = []
layer_dict[layer_type].append(json_obj.get("compressedPixels", []))
# Hopefully will not brake anything.
# if layer_type == "floor":
# active_list.append("floor")
if layer_type == "segment":
active_list.append(int(active_type["active"]))

Expand Down Expand Up @@ -190,7 +187,7 @@ async def async_get_rooms_coordinates(
if rand:
x, y, _ = entry # Extract x and y coordinates
max_x = max(max_x, x) # Update max x coordinate
max_y = max(max_y, y) # Update max y coordinate
max_y = max(max_y, y + pixel_size) # Update max y coordinate
min_x = min(min_x, x) # Update min x coordinate
min_y = min(min_y, y) # Update min y coordinate
else:
Expand Down
29 changes: 16 additions & 13 deletions custom_components/mqtt_vacuum_camera/valetudo/MQTT/connector.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
Version: v2024.08.1
Version: v2024.08.2
- Removed the PNG decode, the json is extracted from map-data instead of map-data-hass.
- Tested no influence on the camera performance.
- Added gzip library used in Valetudo RE data compression.
Expand Down Expand Up @@ -270,20 +270,23 @@ async def rrm_handle_active_segments(self, msg) -> None:

if command == "segmented_cleanup":
segment_ids = command_status.get("segment_ids", [])
# Retrieve room data from RoomStore
rooms_data = await RoomStore().async_get_rooms_data(self._file_name)
# Sort the rooms data by room ID same as rooms data in attributes.
rooms_data = dict(sorted(rooms_data.items(), key=lambda item: int(item[0])))
rrm_active_segments = [0] * len(
rooms_data
) # Initialize based on the number of rooms

# Retrieve the shared room data instead of RoomStore or destinations
shared_rooms_data = self._shared.map_rooms

# Create a mapping of room ID to its index based on the shared rooms data
room_id_to_index = {
room_id: idx for idx, room_id in enumerate(shared_rooms_data)
}

# Initialize rrm_active_segments with zeros based on the number of rooms in shared_rooms_data
rrm_active_segments = [0] * len(shared_rooms_data)

# Update the rrm_active_segments based on segment_ids
for segment_id in segment_ids:
room_name = rooms_data.get(str(segment_id))
if room_name:
# Convert room ID to index; since dict doesn't preserve order, find index manually
room_idx = list(rooms_data.keys()).index(str(segment_id))
rrm_active_segments[room_idx] = 1
room_index = room_id_to_index.get(segment_id)
if room_index is not None:
rrm_active_segments[room_index] = 1

self._shared.rand256_active_zone = rrm_active_segments
_LOGGER.debug(f"Updated Active Segments: {rrm_active_segments}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
Image Handler Module for Valetudo Re Vacuums.
It returns the PIL PNG image frame relative to the Map Data extrapolated from the vacuum json.
It also returns calibration, rooms data to the card and other images information to the camera.
Version: v2024.08.1
Version: v2024.08.2
"""

from __future__ import annotations
Expand Down Expand Up @@ -366,28 +366,25 @@ def _check_robot_position(x: int, y: int) -> bool:
"angle": angle,
"in_room": self.robot_in_room["room"],
}
# ##Still working on this count to fix it on 2024.08.2
# ##The issue is now that somehow the trimming dimensions are not okay.
# ##This cause the camera to crash when resizing the image.
_LOGGER.info(f"{self.file_name}: Auto Zoom is currently disabled.")
# self.active_zones = self.shared.rand256_active_zone
# if self.active_zones and (
# self.robot_in_room["id"] in range(len(self.active_zones))
# ): # issue #100 Index out of range
# self.zooming = bool(self.active_zones[self.robot_in_room["id"]])
# else:
# self.zooming = False
self.active_zones = self.shared.rand256_active_zone
if self.active_zones and (
(self.robot_in_room["id"]) in range(len(self.active_zones))
): # issue #100 Index out of range
self.zooming = bool(self.active_zones[(self.robot_in_room["id"])])
else:
self.zooming = False

return temp
# else we need to search and use the async method
_LOGGER.debug(f"{self.file_name} changed room.. searching..")
room_count = 0
room_count = -1
last_room = None
if self.rooms_pos:
if self.robot_in_room:
last_room = self.robot_in_room
for room in self.rooms_pos:
corners = room["corners"]
room_count += 1
self.robot_in_room = {
"id": room_count,
"left": corners[0][0],
Expand All @@ -396,7 +393,6 @@ def _check_robot_position(x: int, y: int) -> bool:
"down": corners[2][1],
"room": room["name"],
}
room_count += 1
# Check if the robot coordinates are inside the room's corners
if _check_robot_position(robot_x, robot_y):
temp = {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""
Image Draw Class for Valetudo Rand256 Image Handling.
This class is used to simplify the ImageHandler class.
Version: 2024.08.1
Version: 2024.08.2
"""

from __future__ import annotations
Expand Down Expand Up @@ -144,7 +144,6 @@ async def _draw_segments(

room_id = 0
rooms_list = [color_wall]
_LOGGER.info(f"{self.file_name}: Drawing segments. {len(segment_data)}")
if not segment_data:
_LOGGER.info(f"{self.file_name}: No segments data found.")
return room_id, img_np_array
Expand All @@ -154,10 +153,6 @@ async def _draw_segments(
for pixels in segment_data:
room_color = self.img_h.shared.rooms_colors[room_id]
rooms_list.append(room_color)
_LOGGER.debug(
f"Room {room_id} color: {room_color}, "
f"{tuple(self.img_h.active_zones)}"
)
if (
self.img_h.active_zones
and len(self.img_h.active_zones) > room_id
Expand Down
Loading

0 comments on commit c4d4e42

Please sign in to comment.