Skip to content

Commit

Permalink
Fix bug with blur_edges incorrect indent. Cope with .mkv and .flv. Pa…
Browse files Browse the repository at this point in the history
…use video when screen off
  • Loading branch information
paddywwoof committed Dec 13, 2024
1 parent 4b8aebc commit 04e3c9f
Show file tree
Hide file tree
Showing 3 changed files with 88 additions and 44 deletions.
2 changes: 1 addition & 1 deletion src/picframe/image_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
class ImageCache:

EXTENSIONS = ['.png', '.jpg', '.jpeg', '.heif', '.heic']
VIDEO_EXTENSIONS = ['.mp4']
VIDEO_EXTENSIONS = ['.mp4', '.mkv', '.flv']
EXIF_TO_FIELD = {'EXIF FNumber': 'f_number',
'Image Make': 'make',
'Image Model': 'model',
Expand Down
74 changes: 56 additions & 18 deletions src/picframe/video_streamer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,46 @@
import numpy as np
import subprocess
import threading
import signal
import time
import json


class VideoInfo:
def __init__(self, video_path):
probe_cmd = f'ffprobe -v error -show_entries stream=width,height,avg_frame_rate,duration -of json "{video_path}"'
probe_result = subprocess.check_output(probe_cmd, shell=True, text=True)
video_info_list = [vinfo for vinfo in json.loads(probe_result)['streams'] if 'width' in vinfo]
if len(video_info_list) > 0:
video_info = video_info_list[0] # use first if more than one!
self.width = int(video_info['width'])
self.height = int(video_info['height'])
self.fps = eval(video_info['avg_frame_rate']) #TODO this is string in form '24/1' converted to float using eval - try/catch this?
self.duration = float(video_info['duration'])
else:
self.width = self.height = self.fps = self.duration = None
probe_cmd = f"ffmpeg -i {video_path}"
proc = subprocess.Popen(probe_cmd, shell=True, text=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, error) = proc.communicate()
probe_result = (output if proc.returncode == 0 else error).split("\n")

self.width = self.height = self.fps = self.duration = None
err_msg = ""
for ln in probe_result:
if "Duration:" in ln:
dur_split = ln.split(",")[0].split(":")
self.duration = float(dur_split[1]) * 3600 + float(dur_split[2]) * 60 + float(dur_split[3])
if "Video:" in ln:
str_split = ln.split(",")
for v_info in str_split:
if "x" in v_info:
try:
(self.width, self.height) = (int(x) for x in v_info.split()[0].split("x"))
except:
(self.width, self.height) = (240, 180)
elif "tbr" in v_info:
try:
self.fps = int(v_info.split()[0])
except:
self.fps = 24

with open("/home/pi/log2.txt", "a") as f:
f.write(f"{self.width}, {self.height}, {self.fps}, {self.duration}, {err_msg} == {video_path}\n")

class VideoStreamer:
def __init__(self, video_path):
self.flag = False # use to signal new texture
self.kill_thread = False
self.pause_thread = False
self.command = [ 'ffmpeg', '-i', video_path, '-f', 'image2pipe',
'-pix_fmt', 'rgb24', '-vcodec', 'rawvideo', '-']
video_info = VideoInfo(video_path)
Expand All @@ -31,6 +50,7 @@ def __init__(self, video_path):
self.H = video_info.height
self.fps = video_info.fps
self.duration = video_info.duration
self.paused_time = 0.0
self.P = 3
self.image = np.zeros((self.H, self.W, self.P), dtype='uint8')
self.t = threading.Thread(target=self.pipe_thread)
Expand All @@ -41,23 +61,41 @@ def __init__(self, video_path):
self.P = 3
self.fps = 1.0
self.duration = 0.0
self.paused_time = 0.0
self.image = np.zeros((self.H, self.W, self.P), dtype='uint8')
self.t = None

def pipe_thread(self):
while not self.kill_thread:
paused = False
with subprocess.Popen(self.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1) as pipe:
while pipe.poll() is None and not self.kill_thread:
st_tm = time.time()
self.flag = False
self.image = np.frombuffer(pipe.stdout.read(self.H * self.W * self.P), dtype='uint8') # overwrite array
self.image.shape = (self.H, self.W, self.P)
self.flag = True
step = time.time() - st_tm
time.sleep(max(0.04 - step, 0.0)) # adding fps info to ffmpeg doesn't seem to have any effect
if not paused and self.pause_thread: # stop thread running
paused = True
pipe.send_signal(signal.SIGSTOP)
elif paused and not self.pause_thread: # continue thread running
paused = False
pipe.send_signal(signal.SIGCONT)
if not paused:
st_tm = time.time()
self.flag = False
self.image = np.frombuffer(pipe.stdout.read(self.H * self.W * self.P), dtype='uint8') # overwrite array
self.image.shape = (self.H, self.W, self.P)
self.flag = True
step = time.time() - st_tm
time.sleep(max(0.04 - step, 0.0)) # adding fps info to ffmpeg doesn't seem to have any effect
else:
self.paused_time += 0.25
time.sleep(0.25)

def kill(self):
self.kill_thread = True
if self.t is not None:
self.t.join()
del self.image

def pause(self):
self.pause_thread = True

def restart(self):
self.pause_thread = False
56 changes: 31 additions & 25 deletions src/picframe/viewer_display.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from datetime import datetime
from picframe.video_streamer import VideoStreamer

VIDEO_EXTENSIONS = ['.mp4']
VIDEO_EXTENSIONS = ['.mp4', '.mkv', '.flv']

# supported display modes for display switch
dpms_mode = ("unsupported", "pi", "x_dpms")
Expand Down Expand Up @@ -169,6 +169,12 @@ def display_is_on(self, on_off): #on_off is True turns screen on, False turns it
self.__logger.debug("Cause: %s", e)
else:
self.__logger.warning("Unsupported setting for display_power=%d.", self.__display_power)
if self.__video_streamer is not None:
if on_off:
self.__video_streamer.restart()
else:
self.__video_streamer.pause()


def set_show_text(self, txt_key=None, val="ON"):
if txt_key is None:
Expand Down Expand Up @@ -340,28 +346,28 @@ def __tex_load(self, pics, size=None): # noqa: C901
(w, h) = im.size
screen_aspect, image_aspect, diff_aspect = self.__get_aspect_diff(size, im.size)

if self.__blur_edges and size:
if diff_aspect > 0.01:
(sc_b, sc_f) = (size[1] / im.size[1], size[0] / im.size[0])
if screen_aspect > image_aspect:
(sc_b, sc_f) = (sc_f, sc_b) # swap round
(w, h) = (round(size[0] / sc_b / self.__blur_zoom), round(size[1] / sc_b / self.__blur_zoom))
(x, y) = (round(0.5 * (im.size[0] - w)), round(0.5 * (im.size[1] - h)))
box = (x, y, x + w, y + h)
blr_sz = [int(x * 512 / size[0]) for x in size]
im_b = im.resize(size, resample=0, box=box).resize(blr_sz)
im_b = im_b.filter(ImageFilter.GaussianBlur(self.__blur_amount))
im_b = im_b.resize(size, resample=Image.BICUBIC)
im_b.putalpha(round(255 * self.__edge_alpha)) # to apply the same EDGE_ALPHA as the no blur method.
im = im.resize([int(x * sc_f) for x in im.size], resample=Image.BICUBIC)
"""resize can use Image.LANCZOS (alias for Image.ANTIALIAS) for resampling
for better rendering of high-contranst diagonal lines. NB downscaled large
images are rescaled near the start of this try block if w or h > max_dimension
so those lines might need changing too.
"""
im_b.paste(im, box=(round(0.5 * (im_b.size[0] - im.size[0])),
round(0.5 * (im_b.size[1] - im.size[1]))))
im = im_b # have to do this as paste applies in place
if self.__blur_edges and size:
if diff_aspect > 0.01:
(sc_b, sc_f) = (size[1] / im.size[1], size[0] / im.size[0])
if screen_aspect > image_aspect:
(sc_b, sc_f) = (sc_f, sc_b) # swap round
(w, h) = (round(size[0] / sc_b / self.__blur_zoom), round(size[1] / sc_b / self.__blur_zoom))
(x, y) = (round(0.5 * (im.size[0] - w)), round(0.5 * (im.size[1] - h)))
box = (x, y, x + w, y + h)
blr_sz = [int(x * 512 / size[0]) for x in size]
im_b = im.resize(size, resample=0, box=box).resize(blr_sz)
im_b = im_b.filter(ImageFilter.GaussianBlur(self.__blur_amount))
im_b = im_b.resize(size, resample=Image.BICUBIC)
im_b.putalpha(round(255 * self.__edge_alpha)) # to apply the same EDGE_ALPHA as the no blur method.
im = im.resize([int(x * sc_f) for x in im.size], resample=Image.BICUBIC)
"""resize can use Image.LANCZOS (alias for Image.ANTIALIAS) for resampling
for better rendering of high-contranst diagonal lines. NB downscaled large
images are rescaled near the start of this try block if w or h > max_dimension
so those lines might need changing too.
"""
im_b.paste(im, box=(round(0.5 * (im_b.size[0] - im.size[0])),
round(0.5 * (im_b.size[1] - im.size[1]))))
im = im_b # have to do this as paste applies in place
tex = pi3d.Texture(im, blend=True, m_repeat=True, free_after_load=True)
except Exception as e:
self.__logger.warning("Can't create tex from file: \"%s\" or \"%s\"", pics[0].fname, pics[1])
Expand Down Expand Up @@ -596,7 +602,7 @@ def slideshow_is_running(self, pics=None, time_delay=200.0, fade_time=10.0, paus
self.__in_transition = False

if self.__video_streamer is not None and self.__video_streamer.flag is True:
if (tm - self.__start_tm) > self.__video_streamer.duration: # move on to next image at end of video TODO alow repeat behaviour?
if (tm - self.__start_tm) > (self.__video_streamer.duration + self.__video_streamer.paused_time): # move on to next image at end of video TODO alow repeat behaviour?
skip_image = True
else:
self.__sfg.update_ndarray(self.__video_streamer.image, 0)
Expand Down Expand Up @@ -636,7 +642,7 @@ def slideshow_is_running(self, pics=None, time_delay=200.0, fade_time=10.0, paus

video_time = None
if self.__video_streamer is not None:
video_duration = self.__video_streamer.duration * self.__video_streamer.fps / self.__fps
video_duration = (self.__video_streamer.duration + self.__video_streamer.paused_time) * self.__video_streamer.fps / self.__fps
if video_duration > time_delay:
video_time = video_duration
return (loop_running, skip_image, video_time) # now returns tuple with skip_image flag and video_time added
Expand Down

0 comments on commit 04e3c9f

Please sign in to comment.