Get the file finder rendering with a drop shadow

This commit is contained in:
Nathan Sobo 2021-03-22 20:54:52 -06:00
parent 859c0f53cb
commit 548b542766
2 changed files with 122 additions and 76 deletions

View file

@ -10,7 +10,7 @@ use shaders::ToFloat2 as _;
const SHADERS_METALLIB: &'static [u8] = const SHADERS_METALLIB: &'static [u8] =
include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib")); include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
const INSTANCE_BUFFER_SIZE: u64 = 1024 * 1024; const INSTANCE_BUFFER_SIZE: usize = 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
pub struct Renderer { pub struct Renderer {
quad_pipeline_state: metal::RenderPipelineState, quad_pipeline_state: metal::RenderPipelineState,
@ -38,8 +38,10 @@ impl Renderer {
(unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64, (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
MTLResourceOptions::StorageModeManaged, MTLResourceOptions::StorageModeManaged,
); );
let instances = let instances = device.new_buffer(
device.new_buffer(INSTANCE_BUFFER_SIZE, MTLResourceOptions::StorageModeManaged); INSTANCE_BUFFER_SIZE as u64,
MTLResourceOptions::StorageModeManaged,
);
Ok(Self { Ok(Self {
quad_pipeline_state: build_pipeline_state( quad_pipeline_state: build_pipeline_state(
@ -73,13 +75,31 @@ impl Renderer {
zfar: 1.0, zfar: 1.0,
}); });
let mut offset = 0;
for layer in scene.layers() { for layer in scene.layers() {
self.render_shadows(scene, layer, ctx); self.render_shadows(scene, layer, &mut offset, ctx);
self.render_quads(scene, layer, ctx); self.render_quads(scene, layer, &mut offset, ctx);
} }
} }
fn render_shadows(&mut self, scene: &Scene, layer: &Layer, ctx: &RenderContext) { fn render_shadows(
&mut self,
scene: &Scene,
layer: &Layer,
offset: &mut usize,
ctx: &RenderContext,
) {
if layer.shadows().is_empty() {
return;
}
align_offset(offset);
let next_offset = *offset + layer.shadows().len() * mem::size_of::<shaders::GPUIShadow>();
assert!(
next_offset <= INSTANCE_BUFFER_SIZE,
"instance buffer exhausted"
);
ctx.command_encoder ctx.command_encoder
.set_render_pipeline_state(&self.shadow_pipeline_state); .set_render_pipeline_state(&self.shadow_pipeline_state);
ctx.command_encoder.set_vertex_buffer( ctx.command_encoder.set_vertex_buffer(
@ -90,7 +110,7 @@ impl Renderer {
ctx.command_encoder.set_vertex_buffer( ctx.command_encoder.set_vertex_buffer(
shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64, shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
Some(&self.instances), Some(&self.instances),
0, *offset as u64,
); );
ctx.command_encoder.set_vertex_bytes( ctx.command_encoder.set_vertex_bytes(
shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64, shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
@ -101,11 +121,11 @@ impl Renderer {
.as_ptr() as *const c_void, .as_ptr() as *const c_void,
); );
let batch_size = self.instances.length() as usize / mem::size_of::<shaders::GPUIShadow>(); let buffer_contents = unsafe {
(self.instances.contents() as *mut u8).offset(*offset as isize)
let buffer_contents = self.instances.contents() as *mut shaders::GPUIShadow; as *mut shaders::GPUIShadow
for shadow_batch in layer.shadows().chunks(batch_size) { };
for (ix, shadow) in shadow_batch.iter().enumerate() { for (ix, shadow) in layer.shadows().iter().enumerate() {
let shape_bounds = shadow.bounds * scene.scale_factor(); let shape_bounds = shadow.bounds * scene.scale_factor();
let shader_shadow = shaders::GPUIShadow { let shader_shadow = shaders::GPUIShadow {
origin: shape_bounds.origin().to_float2(), origin: shape_bounds.origin().to_float2(),
@ -118,21 +138,38 @@ impl Renderer {
*(buffer_contents.offset(ix as isize)) = shader_shadow; *(buffer_contents.offset(ix as isize)) = shader_shadow;
} }
} }
self.instances.did_modify_range(NSRange { self.instances.did_modify_range(NSRange {
location: 0, location: *offset as u64,
length: (shadow_batch.len() * mem::size_of::<shaders::GPUIShadow>()) as u64, length: (next_offset - *offset) as u64,
}); });
*offset = next_offset;
ctx.command_encoder.draw_primitives_instanced( ctx.command_encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::Triangle, metal::MTLPrimitiveType::Triangle,
0, 0,
6, 6,
shadow_batch.len() as u64, layer.shadows().len() as u64,
); );
} }
}
fn render_quads(&mut self, scene: &Scene, layer: &Layer, ctx: &RenderContext) { fn render_quads(
&mut self,
scene: &Scene,
layer: &Layer,
offset: &mut usize,
ctx: &RenderContext,
) {
if layer.quads().is_empty() {
return;
}
align_offset(offset);
let next_offset = *offset + layer.quads().len() * mem::size_of::<shaders::GPUIQuad>();
assert!(
next_offset <= INSTANCE_BUFFER_SIZE,
"instance buffer exhausted"
);
ctx.command_encoder ctx.command_encoder
.set_render_pipeline_state(&self.quad_pipeline_state); .set_render_pipeline_state(&self.quad_pipeline_state);
ctx.command_encoder.set_vertex_buffer( ctx.command_encoder.set_vertex_buffer(
@ -143,7 +180,7 @@ impl Renderer {
ctx.command_encoder.set_vertex_buffer( ctx.command_encoder.set_vertex_buffer(
shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64, shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
Some(&self.instances), Some(&self.instances),
0, *offset as u64,
); );
ctx.command_encoder.set_vertex_bytes( ctx.command_encoder.set_vertex_bytes(
shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64, shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
@ -154,11 +191,11 @@ impl Renderer {
.as_ptr() as *const c_void, .as_ptr() as *const c_void,
); );
let batch_size = self.instances.length() as usize / mem::size_of::<shaders::GPUIQuad>(); let buffer_contents = unsafe {
(self.instances.contents() as *mut u8).offset(*offset as isize)
let buffer_contents = self.instances.contents() as *mut shaders::GPUIQuad; as *mut shaders::GPUIQuad
for quad_batch in layer.quads().chunks(batch_size) { };
for (ix, quad) in quad_batch.iter().enumerate() { for (ix, quad) in layer.quads().iter().enumerate() {
let bounds = quad.bounds * scene.scale_factor(); let bounds = quad.bounds * scene.scale_factor();
let border_width = quad.border.width * scene.scale_factor(); let border_width = quad.border.width * scene.scale_factor();
let shader_quad = shaders::GPUIQuad { let shader_quad = shaders::GPUIQuad {
@ -183,18 +220,26 @@ impl Renderer {
*(buffer_contents.offset(ix as isize)) = shader_quad; *(buffer_contents.offset(ix as isize)) = shader_quad;
} }
} }
self.instances.did_modify_range(NSRange { self.instances.did_modify_range(NSRange {
location: 0, location: *offset as u64,
length: (quad_batch.len() * mem::size_of::<shaders::GPUIQuad>()) as u64, length: (next_offset - *offset) as u64,
}); });
*offset = next_offset;
ctx.command_encoder.draw_primitives_instanced( ctx.command_encoder.draw_primitives_instanced(
metal::MTLPrimitiveType::Triangle, metal::MTLPrimitiveType::Triangle,
0, 0,
6, 6,
quad_batch.len() as u64, layer.quads().len() as u64,
); );
} }
}
fn align_offset(offset: &mut usize) {
let r = *offset % 256;
if r > 0 {
*offset += 256 - r; // Align to a multiple of 256 to make Metal happy
} }
} }

View file

@ -7,7 +7,7 @@ pub struct Scene {
active_layer_stack: Vec<usize>, active_layer_stack: Vec<usize>,
} }
#[derive(Default)] #[derive(Default, Debug)]
pub struct Layer { pub struct Layer {
clip_bounds: Option<RectF>, clip_bounds: Option<RectF>,
quads: Vec<Quad>, quads: Vec<Quad>,
@ -22,6 +22,7 @@ pub struct Quad {
pub corner_radius: f32, pub corner_radius: f32,
} }
#[derive(Debug)]
pub struct Shadow { pub struct Shadow {
pub bounds: RectF, pub bounds: RectF,
pub corner_radius: f32, pub corner_radius: f32,