Skip to content

Commit

Permalink
New architecture for vk-video initialization.
Browse files Browse the repository at this point in the history
This change allows users to initialize `vk-video` with wgpu surfaces for
rendering to an on-screen window. This was previously overlooked and
there was no possibility to guarantee a `VulkanCtx` was able to render
on screen.

This patch also adds an example h264 player to vk-video.
  • Loading branch information
jerzywilczek committed Nov 12, 2024
1 parent efe10fa commit 40ab0b7
Show file tree
Hide file tree
Showing 24 changed files with 1,733 additions and 248 deletions.
924 changes: 909 additions & 15 deletions Cargo.lock

Large diffs are not rendered by default.

11 changes: 9 additions & 2 deletions compositor_pipeline/src/pipeline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,9 @@ pub struct PipelineCtx {
pub download_dir: Arc<PathBuf>,
pub event_emitter: Arc<EventEmitter>,
#[cfg(feature = "vk-video")]
pub vulkan_ctx: Option<Arc<vk_video::VulkanCtx>>,
pub vulkan_instance: Option<Arc<vk_video::VulkanInstance>>,
#[cfg(feature = "vk-video")]
pub vulkan_device: Option<Arc<vk_video::VulkanDevice>>,
}

impl std::fmt::Debug for PipelineCtx {
Expand All @@ -156,6 +158,7 @@ impl Pipeline {
opts.force_gpu,
opts.wgpu_features,
Default::default(),
None,
)?),
#[cfg(not(feature = "vk-video"))]
None => None,
Expand Down Expand Up @@ -194,7 +197,11 @@ impl Pipeline {
download_dir: download_dir.into(),
event_emitter,
#[cfg(feature = "vk-video")]
vulkan_ctx: preinitialized_ctx.and_then(|ctx| ctx.vulkan_ctx),
vulkan_device: preinitialized_ctx
.as_ref()
.and_then(|ctx| ctx.vulkan_device.clone()),
#[cfg(feature = "vk-video")]
vulkan_instance: preinitialized_ctx.and_then(|ctx| ctx.vulkan_instance.clone()),
},
};

Expand Down
12 changes: 8 additions & 4 deletions compositor_pipeline/src/pipeline/decoder/video/vulkan_video.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration};
use compositor_render::{Frame, FrameData, InputId, Resolution};
use crossbeam_channel::{Receiver, Sender};
use tracing::{debug, error, span, trace, warn, Level};
use vk_video::{VulkanCtx, WgpuTexturesDeocder};
use vk_video::VulkanDevice;

use crate::{
error::InputInitError,
Expand All @@ -17,7 +17,11 @@ pub fn start_vulkan_video_decoder_thread(
frame_sender: Sender<PipelineEvent<Frame>>,
input_id: InputId,
) -> Result<(), InputInitError> {
let Some(vulkan_ctx) = pipeline_ctx.vulkan_ctx.as_ref().map(|ctx| ctx.clone()) else {
let Some(vulkan_ctx) = pipeline_ctx
.vulkan_device
.as_ref()
.map(|device| device.clone())
else {
return Err(InputInitError::VulkanContextRequiredForVulkanDecoder);
};

Expand Down Expand Up @@ -47,12 +51,12 @@ pub fn start_vulkan_video_decoder_thread(
}

fn run_decoder_thread(
vulkan_ctx: Arc<VulkanCtx>,
vulkan_device: Arc<VulkanDevice>,
init_result_sender: Sender<Result<(), InputInitError>>,
chunks_receiver: Receiver<PipelineEvent<EncodedChunk>>,
frame_sender: Sender<PipelineEvent<Frame>>,
) {
let mut decoder = match WgpuTexturesDeocder::new(vulkan_ctx) {
let mut decoder = match vulkan_device.create_wgpu_textures_decoder() {
Ok(decoder) => {
init_result_sender.send(Ok(())).unwrap();
decoder
Expand Down
29 changes: 20 additions & 9 deletions compositor_pipeline/src/pipeline/graphics_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@ pub struct GraphicsContext {
pub queue: Arc<wgpu::Queue>,

#[cfg(feature = "vk-video")]
pub vulkan_ctx: Option<Arc<vk_video::VulkanCtx>>,
pub vulkan_device: Option<Arc<vk_video::VulkanDevice>>,
#[cfg(feature = "vk-video")]
pub vulkan_instance: Option<Arc<vk_video::VulkanInstance>>,
}

impl GraphicsContext {
Expand All @@ -17,6 +19,7 @@ impl GraphicsContext {
force_gpu: bool,
features: wgpu::Features,
limits: wgpu::Limits,
mut compatible_surface: Option<&mut wgpu::Surface<'_>>,
) -> Result<Self, InitPipelineError> {
use compositor_render::{required_wgpu_features, set_required_wgpu_limits};
use tracing::warn;
Expand All @@ -26,23 +29,31 @@ impl GraphicsContext {

let limits = set_required_wgpu_limits(limits);

match vk_video::VulkanCtx::new(vulkan_features, limits.clone()) {
Ok(ctx) => Ok(GraphicsContext {
device: ctx.wgpu_ctx.device.clone(),
queue: ctx.wgpu_ctx.queue.clone(),
vulkan_ctx: Some(ctx.into()),
match vk_video::VulkanInstance::new().and_then(|instance| {
let device =
instance.create_device(vulkan_features, limits.clone(), &mut compatible_surface)?;

Ok((instance, device))
}) {
Ok((instance, device)) => Ok(GraphicsContext {
device: device.wgpu_device.clone(),
queue: device.wgpu_queue.clone(),
vulkan_instance: Some(instance),
vulkan_device: Some(device),
}),

Err(err) => {
warn!("Cannot initialize vulkan video decoding context. Reason: {err}. Initializing without vulkan video support.");

let (device, queue) = create_wgpu_ctx(force_gpu, features, limits)
.map_err(InitRendererEngineError::FailedToInitWgpuCtx)?;
let (device, queue) =
create_wgpu_ctx(force_gpu, features, limits, compatible_surface.map(|s| &*s))
.map_err(InitRendererEngineError::FailedToInitWgpuCtx)?;

Ok(GraphicsContext {
device,
queue,
vulkan_ctx: None,
vulkan_device: None,
vulkan_instance: None,
})
}
}
Expand Down
6 changes: 4 additions & 2 deletions compositor_render/src/wgpu/ctx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ impl WgpuCtx {
Self::new_from_device_queue(device, queue)?
}
None => {
let (device, queue) = create_wgpu_ctx(force_gpu, features, Default::default())?;
let (device, queue) =
create_wgpu_ctx(force_gpu, features, Default::default(), None)?;
Self::new_from_device_queue(device, queue)?
}
};
Expand Down Expand Up @@ -105,6 +106,7 @@ pub fn create_wgpu_ctx(
force_gpu: bool,
features: wgpu::Features,
limits: wgpu::Limits,
compatible_surface: Option<&wgpu::Surface<'_>>,
) -> Result<(Arc<wgpu::Device>, Arc<wgpu::Queue>), CreateWgpuCtxError> {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(),
Expand All @@ -117,7 +119,7 @@ pub fn create_wgpu_ctx(
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptionsBase {
power_preference: wgpu::PowerPreference::HighPerformance,
force_fallback_adapter: false,
compatible_surface: None,
compatible_surface,
}))
.ok_or(CreateWgpuCtxError::NoAdapter)?;

Expand Down
19 changes: 11 additions & 8 deletions integration_tests/examples/manual_graphics_initialization.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,26 +9,29 @@ fn main() {
};
use live_compositor::config::read_config;

let graphics_context =
GraphicsContext::new(false, wgpu::Features::default(), wgpu::Limits::default()).unwrap();
let graphics_context = GraphicsContext::new(
false,
wgpu::Features::default(),
wgpu::Limits::default(),
None,
)
.unwrap();

let _device = graphics_context.device.clone();
let _queue = graphics_context.queue.clone();

let _adapter = graphics_context
.vulkan_ctx
.vulkan_device
.as_ref()
.unwrap()
.wgpu_ctx
.adapter
.wgpu_adapter
.clone();

let _instance = graphics_context
.vulkan_ctx
.vulkan_instance
.as_ref()
.unwrap()
.wgpu_ctx
.instance
.wgpu_instance
.clone();

let config = read_config();
Expand Down
2 changes: 1 addition & 1 deletion integration_tests/examples/raw_channel_input.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ fn main() {
level: "info,wgpu_hal=warn,wgpu_core=warn".to_string(),
});
let config = read_config();
let ctx = GraphicsContext::new(false, Default::default(), Default::default()).unwrap();
let ctx = GraphicsContext::new(false, Default::default(), Default::default(), None).unwrap();
let (wgpu_device, wgpu_queue) = (ctx.device.clone(), ctx.queue.clone());
// no chromium support, so we can ignore _event_loop
let (pipeline, _event_loop) = Pipeline::new(Options {
Expand Down
2 changes: 1 addition & 1 deletion integration_tests/examples/raw_channel_output.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ fn main() {
});
let mut config = read_config();
config.queue_options.ahead_of_time_processing = true;
let ctx = GraphicsContext::new(false, Default::default(), Default::default()).unwrap();
let ctx = GraphicsContext::new(false, Default::default(), Default::default(), None).unwrap();
let (wgpu_device, wgpu_queue) = (ctx.device.clone(), ctx.queue.clone());
// no chromium support, so we can ignore _event_loop
let (pipeline, _event_loop) = Pipeline::new(Options {
Expand Down
4 changes: 2 additions & 2 deletions integration_tests/examples/vulkan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ fn client_code() -> Result<()> {
const INPUT_PORT: u16 = 8006;
const OUTPUT_PORT: u16 = 8004;

const VIDEOS: u16 = 1;
const VIDEOS: u16 = 6;
start_ffmpeg_receive(Some(OUTPUT_PORT), None)?;

let config = read_config();
Expand Down Expand Up @@ -180,7 +180,7 @@ fn client_code() -> Result<()> {
Pipeline::start(&pipeline);

for i in 0..VIDEOS {
start_ffmpeg_send(IP, Some(INPUT_PORT + 2 * i), None, TestSample::Sample)?;
start_ffmpeg_send(IP, Some(INPUT_PORT + 2 * i), None, TestSample::BigBuckBunny)?;
}

let event_loop_fallback = || {
Expand Down
5 changes: 3 additions & 2 deletions src/snapshot_tests/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,9 @@ pub(super) fn frame_to_rgba(frame: &Frame) -> Vec<u8> {

pub(super) fn create_renderer() -> Renderer {
static CTX: OnceLock<(Arc<wgpu::Device>, Arc<wgpu::Queue>)> = OnceLock::new();
let wgpu_ctx =
CTX.get_or_init(|| create_wgpu_ctx(false, Default::default(), Default::default()).unwrap());
let wgpu_ctx = CTX.get_or_init(|| {
create_wgpu_ctx(false, Default::default(), Default::default(), None).unwrap()
});

let (renderer, _event_loop) = Renderer::new(RendererOptions {
web_renderer: web_renderer::WebRendererInitOptions {
Expand Down
3 changes: 3 additions & 0 deletions vk-video/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ repository = "https://github.com/software-mansion/live-compositor"

[dependencies]
ash = "0.38.0"
bytemuck = { version = "1.19.0", features = ["derive"] }
bytes = "1"
derivative = "2.2.0"
h264-reader = { git = "https://github.com/membraneframework-labs/h264-reader.git", branch = "live-compositor" }
Expand All @@ -21,6 +22,8 @@ wgpu = "23.0.0"

[dev-dependencies]
tracing-subscriber = "0.3.18"
winit = "0.29"
clap = { version = "4.5.20", features = ["derive"] }

[build-dependencies]
cfg_aliases = "0.2.1"
14 changes: 8 additions & 6 deletions vk-video/examples/basic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
fn main() {
use std::io::Write;

use vk_video::Frame;
use vk_video::{Frame, VulkanInstance};

let subscriber = tracing_subscriber::FmtSubscriber::builder()
.with_max_level(tracing::Level::INFO)
Expand All @@ -18,17 +18,19 @@ fn main() {

let h264_bytestream = std::fs::read(&args[1]).unwrap_or_else(|_| panic!("read {}", args[1]));

let vulkan_ctx = std::sync::Arc::new(
vk_video::VulkanCtx::new(
let vulkan_instance = VulkanInstance::new().unwrap();
let vulkan_device = vulkan_instance
.create_device(
wgpu::Features::empty(),
wgpu::Limits {
max_push_constant_size: 128,
..Default::default()
},
&mut None,
)
.unwrap(),
);
let mut decoder = vk_video::BytesDecoder::new(vulkan_ctx).unwrap();
.unwrap();

let mut decoder = vulkan_device.create_bytes_decoder().unwrap();

let mut output_file = std::fs::File::create("output.nv12").unwrap();

Expand Down
43 changes: 43 additions & 0 deletions vk-video/examples/player/decoder.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
use std::{
io::Read,
sync::{mpsc::SyncSender, Arc},
time::Duration,
};

use bytes::BytesMut;
use vk_video::VulkanDevice;

use crate::FrameWithPts;

pub fn run_decoder(
tx: SyncSender<super::FrameWithPts>,
framerate: u64,
vulkan_device: Arc<VulkanDevice>,
mut bytestream_reader: impl Read,
) {
let mut decoder = vulkan_device.create_wgpu_textures_decoder().unwrap();
let frame_interval = 1.0 / (framerate as f64);
let mut frame_number = 0u64;
let mut buffer = BytesMut::zeroed(4096);

while let Ok(n) = bytestream_reader.read(&mut buffer) {
if n == 0 {
return;
}

let decoded = decoder.decode(&buffer[..n], None).unwrap();

for f in decoded {
let result = FrameWithPts {
frame: f.frame,
pts: Duration::from_secs_f64(frame_number as f64 * frame_interval),
};

frame_number += 1;

if tx.send(result).is_err() {
return;
}
}
}
}
Loading

0 comments on commit 40ab0b7

Please sign in to comment.