Add missing display.rs and wrclient display dependencies

display.rs (winit + wgpu renderer) and Cargo.toml changes (winit,
wgpu, pollster, env_logger) were created but not committed in the
Task 5 subagent.
This commit is contained in:
Till Wegmueller 2026-04-07 17:51:40 +02:00
parent 564c473ab4
commit 43a4d7e6af
3 changed files with 1124 additions and 4 deletions

781
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -13,6 +13,10 @@ serde.workspace = true
quinn.workspace = true quinn.workspace = true
rustls.workspace = true rustls.workspace = true
tokio.workspace = true tokio.workspace = true
winit = "0.30"
wgpu = "24"
env_logger = "0.11"
pollster = "0.4"
[dev-dependencies] [dev-dependencies]
rcgen.workspace = true rcgen.workspace = true

View file

@ -0,0 +1,343 @@
//! GPU-accelerated display rendering via winit + wgpu.
//!
//! Creates a native window and renders received framebuffer data as a
//! GPU texture using a fullscreen quad. Designed to be independent of
//! the network layer -- accepts raw pixel data as `&[u8]`.
use std::sync::Arc;
use tracing::{debug, info};
use winit::dpi::PhysicalSize;
use winit::window::Window;
/// Holds all wgpu state needed to render frames to a window.
pub struct Display {
surface: wgpu::Surface<'static>,
device: wgpu::Device,
queue: wgpu::Queue,
surface_config: wgpu::SurfaceConfiguration,
render_pipeline: wgpu::RenderPipeline,
bind_group: wgpu::BindGroup,
frame_texture: wgpu::Texture,
width: u32,
height: u32,
}
/// Inline WGSL shader for rendering a fullscreen textured quad.
///
/// Uses a single oversized triangle (3 vertices) to cover the entire
/// screen without needing a vertex buffer. The fragment shader samples
/// the frame texture at the interpolated UV coordinates.
const SHADER_SOURCE: &str = r#"
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) uv: vec2<f32>,
};
@vertex
fn vs_main(@builtin(vertex_index) vertex_index: u32) -> VertexOutput {
// Fullscreen triangle: 3 vertices that cover the entire clip space.
var positions = array<vec2<f32>, 3>(
vec2<f32>(-1.0, -1.0),
vec2<f32>(3.0, -1.0),
vec2<f32>(-1.0, 3.0),
);
// Map clip-space [-1,1] to UV [0,1], flipping Y for texture coords.
var uvs = array<vec2<f32>, 3>(
vec2<f32>(0.0, 1.0),
vec2<f32>(2.0, 1.0),
vec2<f32>(0.0, -1.0),
);
var out: VertexOutput;
out.position = vec4<f32>(positions[vertex_index], 0.0, 1.0);
out.uv = uvs[vertex_index];
return out;
}
@group(0) @binding(0) var t_frame: texture_2d<f32>;
@group(0) @binding(1) var s_frame: sampler;
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return textureSample(t_frame, s_frame, in.uv);
}
"#;
impl Display {
/// Create a new Display backed by the given window.
///
/// `width` and `height` are the framebuffer dimensions from the server.
/// The wgpu surface is configured to match the window's inner size.
pub async fn new(window: Arc<Window>, width: u32, height: u32) -> Self {
let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(),
..Default::default()
});
let surface = instance.create_surface(window.clone()).unwrap();
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
})
.await
.expect("failed to find a suitable GPU adapter");
info!(adapter = ?adapter.get_info().name, "selected GPU adapter");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: Some("wrclient device"),
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: wgpu::MemoryHints::default(),
},
None,
)
.await
.expect("failed to create GPU device");
let window_size = window.inner_size();
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.find(|f| f.is_srgb())
.copied()
.unwrap_or(surface_caps.formats[0]);
let surface_config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: window_size.width.max(1),
height: window_size.height.max(1),
present_mode: wgpu::PresentMode::AutoVsync,
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
surface.configure(&device, &surface_config);
// Create the frame texture that receives pixel data from the server.
let frame_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("frame texture"),
size: wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
let frame_texture_view = frame_texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("frame sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("frame bind group layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("frame bind group"),
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&frame_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
});
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("fullscreen shader"),
source: wgpu::ShaderSource::Wgsl(SHADER_SOURCE.into()),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("render pipeline layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("fullscreen pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: wgpu::PipelineCompilationOptions::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: surface_format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: wgpu::PipelineCompilationOptions::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
Self {
surface,
device,
queue,
surface_config,
render_pipeline,
bind_group,
frame_texture,
width,
height,
}
}
/// Upload framebuffer pixel data to the GPU texture.
///
/// `pixel_data` must be BGRA8 format, `width * height * 4` bytes.
/// If the server sends ARGB8888, the caller should convert byte order
/// before calling this method.
pub fn update_frame(&self, pixel_data: &[u8]) {
let expected = (self.width * self.height * 4) as usize;
if pixel_data.len() != expected {
debug!(
got = pixel_data.len(),
expected, "frame data size mismatch, skipping upload"
);
return;
}
self.queue.write_texture(
self.frame_texture.as_image_copy(),
pixel_data,
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(self.width * 4),
rows_per_image: Some(self.height),
},
wgpu::Extent3d {
width: self.width,
height: self.height,
depth_or_array_layers: 1,
},
);
}
/// Render the current frame texture to the window surface.
///
/// Returns `Ok(())` on success. On surface errors (lost/outdated),
/// reconfigures the surface and returns the error for the caller to
/// decide whether to retry.
pub fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
let output = match self.surface.get_current_texture() {
Ok(tex) => tex,
Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
self.surface.configure(&self.device, &self.surface_config);
return Err(wgpu::SurfaceError::Lost);
}
Err(e) => return Err(e),
};
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("render encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("fullscreen pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..3, 0..1);
}
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
/// Handle a window resize by reconfiguring the surface.
pub fn resize(&mut self, new_size: PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 {
self.surface_config.width = new_size.width;
self.surface_config.height = new_size.height;
self.surface.configure(&self.device, &self.surface_config);
debug!(
width = new_size.width,
height = new_size.height,
"surface resized"
);
}
}
}