soul searching on the whole Sprite/Texture thing. It seems like SFML encourages batches of Textures to be loaded vs a constant stream of new Textures like this application will be producing. So, methinks it's time to venture out into some vulkan 2d graphics libraries

master
mitchellhansen 6 years ago
parent cc5b2a346e
commit 807597ebaf

@ -40,6 +40,7 @@ use vulkano::pipeline::ComputePipeline;
use vulkano::sync::GpuFuture; use vulkano::sync::GpuFuture;
use shaderc::CompileOptions; use shaderc::CompileOptions;
use shade_runner::CompileError; use shade_runner::CompileError;
use crate::workpiece::{WorkpieceLoader, Workpiece};
mod slider; mod slider;
mod timer; mod timer;
@ -47,14 +48,62 @@ mod input;
mod vkprocessor; mod vkprocessor;
mod util; mod util;
mod button; mod button;
mod workpiece;
/*
What next?
Second sprite for rendering paths at x10 or so resolution
color bucketing
Textures and Sprites cannot live in the same struct as there is no way for a sprite to own
its texture and become a single object (rust self-referencing structs)
I want to pull out the textures into their own managing struct instead
But I want to be able to modify the texture after I give it to a sprite which is an issue
So if I place all the textures in a single container and then let a sprite borrow that container
I will no longer be able to modify any of the textures
I have to pass in the textures to the sprite individually so they don't get borrow poisoned
It seems like I can put the textures in a struct as long as I pass the struct.texture explicitly
So at first glance it seems like we need to
+ create a texture
+ assign that texture to a sprite
And any time we want to update the texture. We need to delete the sprite
So I'm kinda coming to the conclusion here that rust SFML is not made for
frequent updates to the screen...
Let's take a look at how easy it would be to replace SFML...
e.g
*/
/// ```
/// struct Thing<'a> {
/// field1: Option<&'a mut Thing<'a>>
/// }
///
/// fn main() {
/// let mut thing1 = Thing { field1: None };
/// let mut thing2 = Thing { field1: None };
///
/// thing1.field1 = Some(&mut thing2);
/// thing1.field1 = Some(&mut thing2); <-- Second mutable borrow error
/// }
/// ```
// What next?
// Second sprite for rendering paths at x10 or so resolution
// color bucketing
fn main() { fn main() {
let font = Font::from_file("resources/fonts/sansation.ttf").unwrap(); let font = Font::from_file("resources/fonts/sansation.ttf").unwrap();
let instance = Instance::new(None, &InstanceExtensions::none(), None).unwrap(); let instance = Instance::new(None, &InstanceExtensions::none(), None).unwrap();
@ -76,11 +125,19 @@ fn main() {
let mut input = Input::new(); let mut input = Input::new();
let xy = processor.xy; let xy = processor.xy;
let mut bg_texture = Texture::new(xy.0, xy.1).unwrap();
bg_texture.update_from_pixels(processor.read_image().as_slice(), xy.0, xy.1, 0, 0);
let mut background_sprite = Sprite::with_texture(&bg_texture); let mut workpieceloader = WorkpieceLoader::new(String::from("resources/images/funky-bird.jpg"));
background_sprite.set_position((0., 0.)); workpieceloader.load_first_stage(processor.read_image());
let mut workpiece = Workpiece::new();
workpiece.render_sprite.set_texture(&mut workpieceloader.first_stage_texture, false);
// workpiece.render_sprite.set_texture(&mut workpieceloader.swatch_texture, false);
// workpiece.render_sprite.set_texture(&mut workpieceloader.vec.get(0).unwrap(), false);
workpiece.render_sprite.set_texture(&mut workpieceloader.vec.get(0).unwrap(), false);
workpiece.render_sprite = Sprite::new();
workpieceloader.first_stage_texture.set_smooth(false);
let mut slider = Slider::new(Vector2f::new(40.0, 40.0), None, &font); let mut slider = Slider::new(Vector2f::new(40.0, 40.0), None, &font);
@ -127,9 +184,9 @@ fn main() {
}, },
Event::MouseWheelScrolled { wheel, delta, x, y } => { Event::MouseWheelScrolled { wheel, delta, x, y } => {
if delta > 0.0 { if delta > 0.0 {
background_sprite.set_scale(background_sprite.get_scale()*Vector2f::new(1.1,1.1)); workpiece.render_sprite.set_scale(workpiece.render_sprite.get_scale()*Vector2f::new(1.1,1.1));
} else { } else {
background_sprite.set_scale(background_sprite.get_scale()*Vector2f::new(0.9,0.9)); workpiece.render_sprite.set_scale(workpiece.render_sprite.get_scale()*Vector2f::new(0.9,0.9));
} }
}, },
_ => {} _ => {}
@ -141,8 +198,8 @@ fn main() {
if input.is_mousebutton_held(Button::Middle) { if input.is_mousebutton_held(Button::Middle) {
let delta = mouse_xy - mouse::desktop_position(); let delta = mouse_xy - mouse::desktop_position();
mouse_xy = mouse::desktop_position(); mouse_xy = mouse::desktop_position();
background_sprite.set_position( workpiece.render_sprite.set_position(
background_sprite.position() - Vector2f::new(delta.x as f32, delta.y as f32) workpiece.render_sprite.position() - Vector2f::new(delta.x as f32, delta.y as f32)
); );
} }
@ -160,7 +217,7 @@ fn main() {
window.clear(&Color::BLACK); window.clear(&Color::BLACK);
window.draw(&background_sprite); window.draw(&workpiece.render_sprite);
window.draw(&slider); window.draw(&slider);

@ -170,13 +170,15 @@ impl<'a> VkProcessor<'a> {
let mut set = let mut set =
PersistentDescriptorSet::start(self.pipeline.clone().unwrap().clone(), 0) PersistentDescriptorSet::start(self.pipeline.clone().unwrap().clone(), 0)
.add_buffer(write_buffer.clone()).unwrap() .add_buffer(write_buffer.clone()).unwrap()
.add_buffer(read_buffer).unwrap() .add_buffer(read_buffer.clone()).unwrap()
.add_buffer(settings_buffer).unwrap(); .add_buffer(settings_buffer.clone()).unwrap();
self.set = Some(Arc::new(set.build().unwrap())); self.set = Some(Arc::new(set.build().unwrap()));
self.img_buffers.push(write_buffer); self.img_buffers.push(write_buffer);
self.img_buffers.push(read_buffer);
self.settings_buffer = Some(settings_buffer);
} }
pub fn run_kernel(&mut self) { pub fn run_kernel(&mut self) {
@ -220,8 +222,6 @@ impl<'a> VkProcessor<'a> {
image_buffer.push(g); image_buffer.push(g);
image_buffer.push(b); image_buffer.push(b);
image_buffer.push(a); image_buffer.push(a);
//self.img.unwrap().put_pixel(x, y, image::Rgba([r, g, b, a]))
} }
} }
@ -247,16 +247,410 @@ impl<'a> VkProcessor<'a> {
} }
} }
/**
use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer};
use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState};
use vulkano::device::{Device, DeviceExtensions};
use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, Subpass, RenderPassAbstract};
use vulkano::image::SwapchainImage;
use vulkano::instance::{Instance, PhysicalDevice};
use vulkano::pipeline::GraphicsPipeline;
use vulkano::pipeline::viewport::Viewport;
use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError};
use vulkano::swapchain;
use vulkano::sync::{GpuFuture, FlushError};
use vulkano::sync;
use vulkano_win::VkSurfaceBuild;
use winit::{EventsLoop, Window, WindowBuilder, Event, WindowEvent};
use std::sync::Arc;
fn main() {
let instance = {
let extensions = vulkano_win::required_extensions();
Instance::new(None, &extensions, None).unwrap()
};
let physical = PhysicalDevice::enumerate(&instance).next().unwrap();
// The objective of this example is to draw a triangle on a window. To do so, we first need to
// create the window.
//
// This is done by creating a `WindowBuilder` from the `winit` crate, then calling the
// `build_vk_surface` method provided by the `VkSurfaceBuild` trait from `vulkano_win`. If you
// ever get an error about `build_vk_surface` being undefined in one of your projects, this
// probably means that you forgot to import this trait.
//
// This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform winit
// window and a cross-platform Vulkan surface that represents the surface of the window.
let mut events_loop = EventsLoop::new();
let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap();
let window = surface.window();
let queue_family = physical.queue_families().find(|&q| {
// We take the first queue that supports drawing to our window.
q.supports_graphics() && surface.is_supported(q).unwrap_or(false)
}).unwrap();
let device_ext = DeviceExtensions { khr_swapchain: true, .. DeviceExtensions::none() };
let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext,
[(queue_family, 0.5)].iter().cloned()).unwrap();
let queue = queues.next().unwrap();
// Before we can draw on the surface, we have to create what is called a swapchain. Creating
// a swapchain allocates the color buffers that will contain the image that will ultimately
// be visible on the screen. These images are returned alongside with the swapchain.
let (mut swapchain, images) = {
// Querying the capabilities of the surface. When we create the swapchain we can only
// pass values that are allowed by the capabilities.
let caps = surface.capabilities(physical).unwrap();
let usage = caps.supported_usage_flags;
// The alpha mode indicates how the alpha value of the final image will behave. For example
// you can choose whether the window will be opaque or transparent.
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
// Choosing the internal format that the images will have.
let format = caps.supported_formats[0].0;
// The dimensions of the window, only used to initially setup the swapchain.
// NOTE:
// On some drivers the swapchain dimensions are specified by `caps.current_extent` and the
// swapchain size must use these dimensions.
// These dimensions are always the same as the window dimensions
//
// However other drivers dont specify a value i.e. `caps.current_extent` is `None`
// These drivers will allow anything but the only sensible value is the window dimensions.
//
// Because for both of these cases, the swapchain needs to be the window dimensions, we just use that.
let initial_dimensions = if let Some(dimensions) = window.get_inner_size() {
// convert to physical pixels
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
[dimensions.0, dimensions.1]
} else {
// The window no longer exists so exit the application.
return;
};
// Please take a look at the docs for the meaning of the parameters we didn't mention.
Swapchain::new(device.clone(), surface.clone(), caps.min_image_count, format,
initial_dimensions, 1, usage, &queue, SurfaceTransform::Identity, alpha,
PresentMode::Fifo, true, None).unwrap()
};
// We now create a buffer that will store the shape of our triangle.
let vertex_buffer = {
#[derive(Default, Debug, Clone)]
struct Vertex { position: [f32; 2] }
vulkano::impl_vertex!(Vertex, position);
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [
Vertex { position: [-0.5, -0.25] },
Vertex { position: [0.0, 0.5] },
Vertex { position: [0.25, -0.1] }
].iter().cloned()).unwrap()
};
// The next step is to create the shaders.
//
// The raw shader creation API provided by the vulkano library is unsafe, for various reasons.
//
// An overview of what the `vulkano_shaders::shader!` macro generates can be found in the
// `vulkano-shaders` crate docs. You can view them at https://docs.rs/vulkano-shaders/
//
// TODO: explain this in details
mod vs {
vulkano_shaders::shader!{
ty: "vertex",
src: "
#version 450
layout(location = 0) in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}"
}
}
mod fs {
vulkano_shaders::shader!{
ty: "fragment",
src: "
#version 450
layout(location = 0) out vec4 f_color;
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
"
}
}
let vs = vs::Shader::load(device.clone()).unwrap();
let fs = fs::Shader::load(device.clone()).unwrap();
// At this point, OpenGL initialization would be finished. However in Vulkan it is not. OpenGL
// implicitly does a lot of computation whenever you draw. In Vulkan, you have to do all this
// manually.
// The next step is to create a *render pass*, which is an object that describes where the
// output of the graphics pipeline will go. It describes the layout of the images
// where the colors, depth and/or stencil information will be written.
let render_pass = Arc::new(vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
// `color` is a custom name we give to the first and only attachment.
color: {
// `load: Clear` means that we ask the GPU to clear the content of this
// attachment at the start of the drawing.
load: Clear,
// `store: Store` means that we ask the GPU to store the output of the draw
// in the actual image. We could also ask it to discard the result.
store: Store,
// `format: <ty>` indicates the type of the format of the image. This has to
// be one of the types of the `vulkano::format` module (or alternatively one
// of your structs that implements the `FormatDesc` trait). Here we use the
// same format as the swapchain.
format: swapchain.format(),
// TODO:
samples: 1,
}
},
pass: {
// We use the attachment named `color` as the one and only color attachment.
color: [color],
// No depth-stencil attachment is indicated with empty brackets.
depth_stencil: {}
}
).unwrap());
// Before we draw we have to create what is called a pipeline. This is similar to an OpenGL
// program, but much more specific.
let pipeline = Arc::new(GraphicsPipeline::start()
// We need to indicate the layout of the vertices.
// The type `SingleBufferDefinition` actually contains a template parameter corresponding
// to the type of each vertex. But in this code it is automatically inferred.
.vertex_input_single_buffer()
// A Vulkan shader can in theory contain multiple entry points, so we have to specify
// which one. The `main` word of `main_entry_point` actually corresponds to the name of
// the entry point.
.vertex_shader(vs.main_entry_point(), ())
// The content of the vertex buffer describes a list of triangles.
.triangle_list()
// Use a resizable viewport set to draw over the entire window
.viewports_dynamic_scissors_irrelevant(1)
// See `vertex_shader`.
.fragment_shader(fs.main_entry_point(), ())
// We have to indicate which subpass of which render pass this pipeline is going to be used
// in. The pipeline will only be usable from this particular subpass.
.render_pass(Subpass::from(render_pass.clone(), 0).unwrap())
// Now that our builder is filled, we call `build()` to obtain an actual pipeline.
.build(device.clone())
.unwrap());
// Dynamic viewports allow us to recreate just the viewport when the window is resized
// Otherwise we would have to recreate the whole pipeline.
let mut dynamic_state = DynamicState { line_width: None, viewports: None, scissors: None };
// The render pass we created above only describes the layout of our framebuffers. Before we
// can draw we also need to create the actual framebuffers.
//
// Since we need to draw to multiple images, we are going to create a different framebuffer for
// each image.
let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state);
// Initialization is finally finished!
// In some situations, the swapchain will become invalid by itself. This includes for example
// when the window is resized (as the images of the swapchain will no longer match the
// window's) or, on Android, when the application went to the background and goes back to the
// foreground.
//
// In this situation, acquiring a swapchain image or presenting it will return an error.
// Rendering to an image of that swapchain will not produce any error, but may or may not work.
// To continue rendering, we need to recreate the swapchain by creating a new swapchain.
// Here, we remember that we need to do this for the next loop iteration.
let mut recreate_swapchain = false;
// In the loop below we are going to submit commands to the GPU. Submitting a command produces
// an object that implements the `GpuFuture` trait, which holds the resources for as long as
// they are in use by the GPU.
//
// Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
// that, we store the submission of the previous frame here.
let mut previous_frame_end = Box::new(sync::now(device.clone())) as Box<dyn GpuFuture>;
loop {
// It is important to call this function from time to time, otherwise resources will keep
// accumulating and you will eventually reach an out of memory error.
// Calling this function polls various fences in order to determine what the GPU has
// already processed, and frees the resources that are no longer needed.
previous_frame_end.cleanup_finished();
// Whenever the window resizes we need to recreate everything dependent on the window size.
// In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
if recreate_swapchain {
// Get the new dimensions of the window.
let dimensions = if let Some(dimensions) = window.get_inner_size() {
let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
[dimensions.0, dimensions.1]
} else {
return;
};
let (new_swapchain, new_images) = match swapchain.recreate_with_dimension(dimensions) {
Ok(r) => r,
// This error tends to happen when the user is manually resizing the window.
// Simply restarting the loop is the easiest way to fix this issue.
Err(SwapchainCreationError::UnsupportedDimensions) => continue,
Err(err) => panic!("{:?}", err)
};
swapchain = new_swapchain;
// Because framebuffers contains an Arc on the old swapchain, we need to
// recreate framebuffers as well.
framebuffers = window_size_dependent_setup(&new_images, render_pass.clone(), &mut dynamic_state);
recreate_swapchain = false;
}
// Before we can draw on the output, we have to *acquire* an image from the swapchain. If
// no image is available (which happens if you submit draw commands too quickly), then the
// function will block.
// This operation returns the index of the image that we are allowed to draw upon.
//
// This function can block if no image is available. The parameter is an optional timeout
// after which the function call will return an error.
let (image_num, acquire_future) = match swapchain::acquire_next_image(swapchain.clone(), None) {
Ok(r) => r,
Err(AcquireError::OutOfDate) => {
recreate_swapchain = true;
continue;
},
Err(err) => panic!("{:?}", err)
};
// Specify the color to clear the framebuffer with i.e. blue
let clear_values = vec!([0.0, 0.0, 1.0, 1.0].into());
// In order to draw, we have to build a *command buffer*. The command buffer object holds
// the list of commands that are going to be executed.
//
// Building a command buffer is an expensive operation (usually a few hundred
// microseconds), but it is known to be a hot path in the driver and is expected to be
// optimized.
//
// Note that we have to pass a queue family when we create the command buffer. The command
// buffer will only be executable on that given queue family.
let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(device.clone(), queue.family()).unwrap()
// Before we can draw, we have to *enter a render pass*. There are two methods to do
// this: `draw_inline` and `draw_secondary`. The latter is a bit more advanced and is
// not covered here.
//
// The third parameter builds the list of values to clear the attachments with. The API
// is similar to the list of attachments when building the framebuffers, except that
// only the attachments that use `load: Clear` appear in the list.
.begin_render_pass(framebuffers[image_num].clone(), false, clear_values)
.unwrap()
// We are now inside the first subpass of the render pass. We add a draw command.
//
// The last two parameters contain the list of resources to pass to the shaders.
// Since we used an `EmptyPipeline` object, the objects have to be `()`.
.draw(pipeline.clone(), &dynamic_state, vertex_buffer.clone(), (), ())
.unwrap()
// We leave the render pass by calling `draw_end`. Note that if we had multiple
// subpasses we could have called `next_inline` (or `next_secondary`) to jump to the
// next subpass.
.end_render_pass()
.unwrap()
// Finish building the command buffer by calling `build`.
.build().unwrap();
let future = previous_frame_end.join(acquire_future)
.then_execute(queue.clone(), command_buffer).unwrap()
// The color output is now expected to contain our triangle. But in order to show it on
// the screen, we have to *present* the image by calling `present`.
//
// This function does not actually present the image immediately. Instead it submits a
// present command at the end of the queue. This means that it will only be presented once
// the GPU has finished executing the command buffer that draws the triangle.
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
.then_signal_fence_and_flush();
match future {
Ok(future) => {
previous_frame_end = Box::new(future) as Box<_>;
}
Err(FlushError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>;
}
Err(e) => {
println!("{:?}", e);
previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>;
}
}
// Note that in more complex programs it is likely that one of `acquire_next_image`,
// `command_buffer::submit`, or `present` will block for some time. This happens when the
// GPU's queue is full and the driver has to wait until the GPU finished some work.
//
// Unfortunately the Vulkan API doesn't provide any way to not wait or to detect when a
// wait would happen. Blocking may be the desired behavior, but if you don't want to
// block you should spawn a separate thread dedicated to submissions.
// Handling the window events in order to close the program when the user wants to close
// it.
let mut done = false;
events_loop.poll_events(|ev| {
match ev {
Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => done = true,
Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true,
_ => ()
}
});
if done { return; }
}
}
/// This method is called once during initialization, then again whenever the window is resized
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage<Window>>],
render_pass: Arc<dyn RenderPassAbstract + Send + Sync>,
dynamic_state: &mut DynamicState
) -> Vec<Arc<dyn FramebufferAbstract + Send + Sync>> {
let dimensions = images[0].dimensions();
let viewport = Viewport {
origin: [0.0, 0.0],
dimensions: [dimensions[0] as f32, dimensions[1] as f32],
depth_range: 0.0 .. 1.0,
};
dynamic_state.viewports = Some(vec!(viewport));
images.iter().map(|image| {
Arc::new(
Framebuffer::start(render_pass.clone())
.add(image.clone()).unwrap()
.build().unwrap()
) as Arc<dyn FramebufferAbstract + Send + Sync>
}).collect::<Vec<_>>()
}
*/

@ -0,0 +1,112 @@
use sfml::graphics::{Texture, Sprite, IntRect, Drawable};
use sfml::system::Vector2u;
use sfml::graphics::Transformable;
//pub struct Thing<'a> {
//
// loader: WorkpieceLoader,
// workpiece: Workpiece<'a>
//
//}
//
//impl<'a> Thing<'a> {
//
// pub fn new(pixels: Vec<u8>) -> Thing<'a> {
//
// let mut workpieceloader = WorkpieceLoader::new(String::from("resources/images/funky-bird.jpg"));
// workpieceloader.load_first_stage(pixels);
//
// let mut workpiece = Workpiece::new();
// workpiece.load_first_stage(&mut workpieceloader.first_stage_texture);
//
// Thing {
// loader: workpieceloader,
// workpiece: workpiece
// }
// }
//}
/*
first_thing = Thing1::new();
second_thing = Thing1::new();
let mut container_thing = Thing2::new();
container_thing.field = &mut first_thing;
container_thing.field = &mut second_thing;
first_thing.field = 10;
*/
#[derive(Clone)]
pub struct WorkpieceLoader {
pub swatch_texture: Texture,
pub first_stage_texture: Texture,
pub xy: Vector2u,
pub vec: Vec<Texture>,
}
impl WorkpieceLoader {
pub fn new(filepath: String) -> WorkpieceLoader {
let texture = Texture::from_file(filepath.as_str()).expect("Couldn't load image");
let xy = texture.size();
WorkpieceLoader {
swatch_texture: texture,
first_stage_texture: Texture::new(xy.x, xy.y).unwrap(),
xy: xy,
vec: vec![]
}
}
pub fn load_first_stage(&mut self, pixels: Vec<u8>) {
self.vec.clear();
let mut t = Texture::new(self.xy.x, self.xy.y).expect("Couldn't load image");
t.update_from_pixels(pixels.as_slice(), self.xy.x, self.xy.y, 0, 0);
self.vec.push(t);
}
pub fn get_first(&mut self) -> &Texture {
&self.first_stage_texture
}
}
pub struct Workpiece<'a> {
}
impl<'a> Workpiece<'a> {
pub fn new() -> Workpiece<'a> {
Workpiece {
}
}
pub fn load_first_stage(&mut self, texture: &'a mut Texture) {
self.render_sprite.set_texture(texture, false);
self.render_sprite.set_position((0., 0.));
}
}
impl<'s> Drawable for Workpiece<'s> {
fn draw<'a: 'shader, 'texture, 'shader, 'shader_texture>(
&'a self,
render_target: &mut RenderTarget,
_: RenderStates<'texture, 'shader, 'shader_texture>,
) {
}
}
Loading…
Cancel
Save