From 8a699c8f6b04f198a0bef2c9dfe3db8b70e0f886 Mon Sep 17 00:00:00 2001
From: mitchellhansen <mitchellhansen0@gmail.com>
Date: Tue, 9 Jul 2019 21:33:31 -0700
Subject: [PATCH] porting over to full vulkan

---
 Cargo.toml         |   5 +-
 src/main.rs        | 487 ++++++++++++++++++++++++++++++++++++++++++---
 src/vkprocessor.rs | 411 +-------------------------------------
 src/workpiece.rs   |   4 +-
 4 files changed, 468 insertions(+), 439 deletions(-)

diff --git a/Cargo.toml b/Cargo.toml
index ad9482e1..0d23c3d1 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -11,10 +11,11 @@ simple-stopwatch="0.1.4"
 nalgebra = "0.18.0"
 image = "0.21.2"
 rand = "0.6.5"
-vulkano = "0.12.0"
+vulkano = "0.13.0"
 vulkano-shaders = "0.12.0"
 time = "0.1.38"
 shaderc = "0.5.0"
 shade_runner = {version = "0.1.1", git = "https://github.com/MitchellHansen/shade_runner"}
 #shade_runner = {version = "0.1.1", path = "../shade_runner"}
-
+vulkano-win = "0.13.0"
+winit = "0.19.1"
\ No newline at end of file
diff --git a/src/main.rs b/src/main.rs
index 62c42fc2..88582f8d 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1,3 +1,5 @@
+
+/*
 #![allow(dead_code)]
 #![allow(unused_variables)]
 #![allow(unused_mut)]
@@ -77,33 +79,12 @@ So I'm kinda coming to the conclusion here that rust SFML is not made for
 frequent updates to the screen...
 
 Let's take a look at how easy it would be to replace SFML...
-
-
-
-e.g
-
 */
 
-/// ```
-/// struct Thing<'a> {
-///     field1: Option<&'a mut Thing<'a>>
-/// }
-///
-/// fn main() {
-///     let mut thing1 = Thing { field1: None };
-///     let mut thing2 = Thing { field1: None };
-///
-///     thing1.field1 = Some(&mut thing2);
-///     thing1.field1 = Some(&mut thing2); <-- Second mutable borrow error
-/// }
-/// ```
-
 
 
 fn main() {
 
-
-
     let font = Font::from_file("resources/fonts/sansation.ttf").unwrap();
 
     let instance = Instance::new(None, &InstanceExtensions::none(), None).unwrap();
@@ -129,15 +110,14 @@ fn main() {
     let mut workpieceloader = WorkpieceLoader::new(String::from("resources/images/funky-bird.jpg"));
     workpieceloader.load_first_stage(processor.read_image());
 
-    let mut workpiece = Workpiece::new();
-    workpiece.render_sprite.set_texture(&mut workpieceloader.first_stage_texture, false);
-//    workpiece.render_sprite.set_texture(&mut workpieceloader.swatch_texture, false);
-//    workpiece.render_sprite.set_texture(&mut workpieceloader.vec.get(0).unwrap(), false);
-    workpiece.render_sprite.set_texture(&mut workpieceloader.vec.get(0).unwrap(), false);
+    let mut texture = Texture::from_file("resources/images/funky-bird.jpg").expect("Couldn't load image");
 
-    workpiece.render_sprite = Sprite::new();
+    let mut workpiece = Workpiece::new();
+    workpiece.render_sprite.set_texture(&mut texture, false);
+    workpiece.render_sprite.texture().unwrap().update_from_pixesl();
+    texture.set_smooth(false);
 
-    workpieceloader.first_stage_texture.set_smooth(false);
+    workpieceloader.first_stage_texture.set_smooth(true);
 
     let mut slider = Slider::new(Vector2f::new(40.0, 40.0), None, &font);
 
@@ -230,3 +210,454 @@ fn main() {
         window.display();
     }
 }
+*/
+
+use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer, DeviceLocalBuffer, ImmutableBuffer, BufferAccess};
+use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState};
+use vulkano::descriptor::descriptor_set::{PersistentDescriptorSet, StdDescriptorPoolAlloc};
+use vulkano::device::{Device, DeviceExtensions, QueuesIter, Queue};
+use vulkano::instance::{Instance, InstanceExtensions, PhysicalDevice, QueueFamily};
+use vulkano::pipeline::ComputePipeline;
+use vulkano::sync::{GpuFuture, FlushError};
+use vulkano::sync;
+use std::time::SystemTime;
+use std::sync::Arc;
+use std::ffi::CStr;
+use std::path::PathBuf;
+use shade_runner as sr;
+use image::{DynamicImage, ImageBuffer};
+use image::GenericImageView;
+use vulkano::descriptor::pipeline_layout::PipelineLayout;
+use image::GenericImage;
+use shade_runner::{ComputeLayout, CompileError};
+use vulkano::descriptor::descriptor_set::PersistentDescriptorSetBuf;
+use shaderc::CompileOptions;
+use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError};
+use vulkano::swapchain;
+use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, Subpass, RenderPassAbstract};
+use vulkano::image::SwapchainImage;
+use vulkano::pipeline::GraphicsPipeline;
+use vulkano::pipeline::viewport::Viewport;
+
+
+use vulkano_win::VkSurfaceBuild;
+use winit::{EventsLoop, Window, WindowBuilder, Event, WindowEvent};
+
+use vulkano::command_buffer::synced::SyncCommandBufferBuilder;
+
+
+fn main() {
+
+    let instance = {
+        let extensions = vulkano_win::required_extensions();
+        Instance::new(None, &extensions, None).unwrap()
+    };
+
+    let physical = PhysicalDevice::enumerate(&instance).next().unwrap();
+
+    // The objective of this example is to draw a triangle on a window. To do so, we first need to
+    // create the window.
+    //
+    // This is done by creating a `WindowBuilder` from the `winit` crate, then calling the
+    // `build_vk_surface` method provided by the `VkSurfaceBuild` trait from `vulkano_win`. If you
+    // ever get an error about `build_vk_surface` being undefined in one of your projects, this
+    // probably means that you forgot to import this trait.
+    //
+    // This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform winit
+    // window and a cross-platform Vulkan surface that represents the surface of the window.
+    let mut events_loop = EventsLoop::new();
+
+    let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap();
+    let window = surface.window();
+
+    let queue_family = physical.queue_families().find(|&q| {
+            // We take the first queue that supports drawing to our window.
+            q.supports_graphics() &&
+            surface.is_supported(q).unwrap_or(false) &&
+            q.supports_compute()
+    }).unwrap();
+
+    let device_ext = DeviceExtensions { khr_swapchain: true, .. DeviceExtensions::none() };
+    let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext,
+                                           [(queue_family, 0.5)].iter().cloned()).unwrap();
+
+    let queue = queues.next().unwrap();
+
+    // Before we can draw on the surface, we have to create what is called a swapchain. Creating
+    // a swapchain allocates the color buffers that will contain the image that will ultimately
+    // be visible on the screen. These images are returned alongside with the swapchain.
+    let (mut swapchain, images) = {
+        // Querying the capabilities of the surface. When we create the swapchain we can only
+        // pass values that are allowed by the capabilities.
+        let capabilities = surface.capabilities(physical).unwrap();
+
+        let usage = capabilities.supported_usage_flags;
+
+        // The alpha mode indicates how the alpha value of the final image will behave. For example
+        // you can choose whether the window will be opaque or transparent.
+        let alpha = capabilities.supported_composite_alpha.iter().next().unwrap();
+
+        // Choosing the internal format that the images will have.
+        let format = capabilities.supported_formats[0].0;
+
+        // The dimensions of the window, only used to initially setup the swapchain.
+        // NOTE:
+        // On some drivers the swapchain dimensions are specified by `caps.current_extent` and the
+        // swapchain size must use these dimensions.
+        // These dimensions are always the same as the window dimensions
+        //
+        // However other drivers dont specify a value i.e. `caps.current_extent` is `None`
+        // These drivers will allow anything but the only sensible value is the window dimensions.
+        //
+        // Because for both of these cases, the swapchain needs to be the window dimensions, we just use that.
+        let initial_dimensions = if let Some(dimensions) = window.get_inner_size() {
+            // convert to physical pixels
+            let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
+            [dimensions.0, dimensions.1]
+        } else {
+            // The window no longer exists so exit the application.
+            return;
+        };
+
+        // Please take a look at the docs for the meaning of the parameters we didn't mention.
+        Swapchain::new(device.clone(), surface.clone(), capabilities.min_image_count, format,
+                       initial_dimensions, 1, usage, &queue, SurfaceTransform::Identity, alpha,
+                       PresentMode::Fifo, true, None).unwrap()
+
+    };
+
+
+    // We now create a buffer that will store the shape of our triangle.
+    let vertex_buffer = {
+        #[derive(Default, Debug, Clone)]
+        struct Vertex { position: [f32; 2] }
+        vulkano::impl_vertex!(Vertex, position);
+
+        CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [
+            Vertex { position: [-0.5, -0.25] },
+            Vertex { position: [0.0, 0.5] },
+            Vertex { position: [0.25, -0.1] }
+        ].iter().cloned()).unwrap()
+    };
+
+    mod vs {
+        vulkano_shaders::shader!{
+            ty: "vertex",
+            src: "
+#version 450
+
+layout(location = 0) in vec2 position;
+
+void main() {
+    gl_Position = vec4(position, 0.0, 1.0);
+}"
+        }
+    }
+
+    mod fs {
+        vulkano_shaders::shader!{
+            ty: "fragment",
+            src: "
+#version 450
+
+layout(location = 0) out vec4 f_color;
+
+void main() {
+    f_color = vec4(1.0, 0.0, 0.0, 1.0);
+}
+"
+        }
+    }
+
+    let vs = vs::Shader::load(device.clone()).unwrap();
+    let fs = fs::Shader::load(device.clone()).unwrap();
+
+    // The next step is to create a *render pass*, which is an object that describes where the
+    // output of the graphics pipeline will go. It describes the layout of the images
+    // where the colors, depth and/or stencil information will be written.
+    let render_pass = Arc::new(vulkano::single_pass_renderpass!(
+        device.clone(),
+        attachments: {
+            // `color` is a custom name we give to the first and only attachment.
+            color: {
+                // `load: Clear` means that we ask the GPU to clear the content of this
+                // attachment at the start of the drawing.
+                load: Clear,
+                // `store: Store` means that we ask the GPU to store the output of the draw
+                // in the actual image. We could also ask it to discard the result.
+                store: Store,
+                // `format: <ty>` indicates the type of the format of the image. This has to
+                // be one of the types of the `vulkano::format` module (or alternatively one
+                // of your structs that implements the `FormatDesc` trait). Here we use the
+                // same format as the swapchain.
+                format: swapchain.format(),
+                // TODO:
+                samples: 1,
+            }
+        },
+        pass: {
+            // We use the attachment named `color` as the one and only color attachment.
+            color: [color],
+            // No depth-stencil attachment is indicated with empty brackets.
+            depth_stencil: {}
+        }
+    ).unwrap());
+
+    // Before we draw we have to create what is called a pipeline. This is similar to an OpenGL
+    // program, but much more specific.
+    let pipeline = Arc::new(GraphicsPipeline::start()
+        // We need to indicate the layout of the vertices.
+        // The type `SingleBufferDefinition` actually contains a template parameter corresponding
+        // to the type of each vertex. But in this code it is automatically inferred.
+        .vertex_input_single_buffer()
+        // A Vulkan shader can in theory contain multiple entry points, so we have to specify
+        // which one. The `main` word of `main_entry_point` actually corresponds to the name of
+        // the entry point.
+        .vertex_shader(vs.main_entry_point(), ())
+        // The content of the vertex buffer describes a list of triangles.
+        .triangle_list()
+        // Use a resizable viewport set to draw over the entire window
+        .viewports_dynamic_scissors_irrelevant(1)
+        // See `vertex_shader`.
+        .fragment_shader(fs.main_entry_point(), ())
+        // We have to indicate which subpass of which render pass this pipeline is going to be used
+        // in. The pipeline will only be usable from this particular subpass.
+        .render_pass(Subpass::from(render_pass.clone(), 0).unwrap())
+        // Now that our builder is filled, we call `build()` to obtain an actual pipeline.
+        .build(device.clone())
+        .unwrap());
+
+    // Dynamic viewports allow us to recreate just the viewport when the window is resized
+    // Otherwise we would have to recreate the whole pipeline.
+    let mut dynamic_state = DynamicState { line_width: None, viewports: None, scissors: None };
+
+    // The render pass we created above only describes the layout of our framebuffers. Before we
+    // can draw we also need to create the actual framebuffers.
+    //
+    // Since we need to draw to multiple images, we are going to create a different framebuffer for
+    // each image.
+    let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state);
+
+    // Initialization is finally finished!
+
+    // In some situations, the swapchain will become invalid by itself. This includes for example
+    // when the window is resized (as the images of the swapchain will no longer match the
+    // window's) or, on Android, when the application went to the background and goes back to the
+    // foreground.
+    //
+    // In this situation, acquiring a swapchain image or presenting it will return an error.
+    // Rendering to an image of that swapchain will not produce any error, but may or may not work.
+    // To continue rendering, we need to recreate the swapchain by creating a new swapchain.
+    // Here, we remember that we need to do this for the next loop iteration.
+    let mut recreate_swapchain = false;
+
+    // In the loop below we are going to submit commands to the GPU. Submitting a command produces
+    // an object that implements the `GpuFuture` trait, which holds the resources for as long as
+    // they are in use by the GPU.
+    //
+    // Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
+    // that, we store the submission of the previous frame here.
+    let mut previous_frame_end = Box::new(sync::now(device.clone())) as Box<dyn GpuFuture>;
+
+    loop {
+        // It is important to call this function from time to time, otherwise resources will keep
+        // accumulating and you will eventually reach an out of memory error.
+        // Calling this function polls various fences in order to determine what the GPU has
+        // already processed, and frees the resources that are no longer needed.
+        previous_frame_end.cleanup_finished();
+
+        // Whenever the window resizes we need to recreate everything dependent on the window size.
+        // In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
+        if recreate_swapchain {
+            // Get the new dimensions of the window.
+            let dimensions = if let Some(dimensions) = window.get_inner_size() {
+                let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
+                [dimensions.0, dimensions.1]
+            } else {
+                return;
+            };
+
+            let (new_swapchain, new_images) = match swapchain.recreate_with_dimension(dimensions) {
+                Ok(r) => r,
+                // This error tends to happen when the user is manually resizing the window.
+                // Simply restarting the loop is the easiest way to fix this issue.
+                Err(SwapchainCreationError::UnsupportedDimensions) => continue,
+                Err(err) => panic!("{:?}", err)
+            };
+
+            swapchain = new_swapchain;
+            // Because framebuffers contains an Arc on the old swapchain, we need to
+            // recreate framebuffers as well.
+            framebuffers = window_size_dependent_setup(&new_images, render_pass.clone(), &mut dynamic_state);
+
+            recreate_swapchain = false;
+        }
+
+        // Before we can draw on the output, we have to *acquire* an image from the swapchain. If
+        // no image is available (which happens if you submit draw commands too quickly), then the
+        // function will block.
+        // This operation returns the index of the image that we are allowed to draw upon.
+        //
+        // This function can block if no image is available. The parameter is an optional timeout
+        // after which the function call will return an error.
+        let (image_num, acquire_future) = match swapchain::acquire_next_image(swapchain.clone(), None) {
+            Ok(r) => r,
+            Err(AcquireError::OutOfDate) => {
+                recreate_swapchain = true;
+                continue;
+            },
+            Err(err) => panic!("{:?}", err)
+        };
+
+        // Specify the color to clear the framebuffer with i.e. blue
+        let clear_values = vec!([0.0, 0.0, 1.0, 1.0].into());
+
+
+        {
+            let project_root =
+                std::env::current_dir()
+                    .expect("failed to get root directory");
+
+            let mut compute_path = project_root.clone();
+            compute_path.push(PathBuf::from("resources/shaders/"));
+            compute_path.push(PathBuf::from("simple-edge.compute"));
+
+
+            let mut options = CompileOptions::new().ok_or(CompileError::CreateCompiler).unwrap();
+            options.add_macro_definition("SETTING_POS_X", Some("0"));
+            options.add_macro_definition("SETTING_POS_Y", Some("1"));
+            options.add_macro_definition("SETTING_BUCKETS_START", Some("2"));
+            options.add_macro_definition("SETTING_BUCKETS_LEN", Some("2"));
+
+            let shader =
+                sr::load_compute_with_options(compute_path, options)
+                    .expect("Failed to compile");
+
+            let vulkano_entry =
+                sr::parse_compute(&shader)
+                    .expect("failed to parse");
+
+            let x = unsafe {
+                vulkano::pipeline::shader::ShaderModule::from_words(device.clone(), &shader.compute)
+            }.unwrap();
+
+            let pipeline = Arc::new({
+                unsafe {
+                    ComputePipeline::new(device.clone(), &x.compute_entry_point(
+                        CStr::from_bytes_with_nul_unchecked(b"main\0"),
+                        vulkano_entry.compute_layout), &(),
+                    ).unwrap()
+                }
+            });
+
+        }
+
+        // In order to draw, we have to build a *command buffer*. The command buffer object holds
+        // the list of commands that are going to be executed.
+        //
+        // Building a command buffer is an expensive operation (usually a few hundred
+        // microseconds), but it is known to be a hot path in the driver and is expected to be
+        // optimized.
+        //
+        // Note that we have to pass a queue family when we create the command buffer. The command
+        // buffer will only be executable on that given queue family.
+        let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(device.clone(), queue.family()).unwrap()
+            // Before we can draw, we have to *enter a render pass*. There are two methods to do
+            // this: `draw_inline` and `draw_secondary`. The latter is a bit more advanced and is
+            // not covered here.
+            //
+            // The third parameter builds the list of values to clear the attachments with. The API
+            // is similar to the list of attachments when building the framebuffers, except that
+            // only the attachments that use `load: Clear` appear in the list.
+            .begin_render_pass(framebuffers[image_num].clone(), false, clear_values)
+            .unwrap()
+
+            // We are now inside the first subpass of the render pass. We add a draw command.
+            //
+            // The last two parameters contain the list of resources to pass to the shaders.
+            // Since we used an `EmptyPipeline` object, the objects have to be `()`.
+            .draw(pipeline.clone(), &dynamic_state, vertex_buffer.clone(), (), ())
+            .unwrap()
+
+            // We leave the render pass by calling `draw_end`. Note that if we had multiple
+            // subpasses we could have called `next_inline` (or `next_secondary`) to jump to the
+            // next subpass.
+            .end_render_pass()
+            .unwrap()
+
+            // Finish building the command buffer by calling `build`.
+            .build().unwrap();
+
+        let future = previous_frame_end.join(acquire_future)
+            .then_execute(queue.clone(), command_buffer).unwrap()
+
+            // The color output is now expected to contain our triangle. But in order to show it on
+            // the screen, we have to *present* the image by calling `present`.
+            //
+            // This function does not actually present the image immediately. Instead it submits a
+            // present command at the end of the queue. This means that it will only be presented once
+            // the GPU has finished executing the command buffer that draws the triangle.
+            .then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
+            .then_signal_fence_and_flush();
+
+        match future {
+            Ok(future) => {
+                previous_frame_end = Box::new(future) as Box<_>;
+            }
+            Err(FlushError::OutOfDate) => {
+                recreate_swapchain = true;
+                previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>;
+            }
+            Err(e) => {
+                println!("{:?}", e);
+                previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>;
+            }
+        }
+
+        // Note that in more complex programs it is likely that one of `acquire_next_image`,
+        // `command_buffer::submit`, or `present` will block for some time. This happens when the
+        // GPU's queue is full and the driver has to wait until the GPU finished some work.
+        //
+        // Unfortunately the Vulkan API doesn't provide any way to not wait or to detect when a
+        // wait would happen. Blocking may be the desired behavior, but if you don't want to
+        // block you should spawn a separate thread dedicated to submissions.
+
+        // Handling the window events in order to close the program when the user wants to close
+        // it.
+        let mut done = false;
+        events_loop.poll_events(|ev| {
+            match ev {
+                Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => done = true,
+                Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true,
+                _ => ()
+            }
+        });
+        if done { return; }
+    }
+}
+
+/// This method is called once during initialization, then again whenever the window is resized
+fn window_size_dependent_setup(
+    images: &[Arc<SwapchainImage<Window>>],
+    render_pass: Arc<dyn RenderPassAbstract + Send + Sync>,
+    dynamic_state: &mut DynamicState
+) -> Vec<Arc<dyn FramebufferAbstract + Send + Sync>> {
+    let dimensions = images[0].dimensions();
+
+    let viewport = Viewport {
+        origin: [0.0, 0.0],
+        dimensions: [dimensions[0] as f32, dimensions[1] as f32],
+        depth_range: 0.0 .. 1.0,
+    };
+    dynamic_state.viewports = Some(vec!(viewport));
+
+    images.iter().map(|image| {
+        Arc::new(
+            Framebuffer::start(render_pass.clone())
+                .add(image.clone()).unwrap()
+                .build().unwrap()
+        ) as Arc<dyn FramebufferAbstract + Send + Sync>
+    }).collect::<Vec<_>>()
+}
+
+
diff --git a/src/vkprocessor.rs b/src/vkprocessor.rs
index 91480818..065459d4 100644
--- a/src/vkprocessor.rs
+++ b/src/vkprocessor.rs
@@ -163,7 +163,6 @@ impl<'a> VkProcessor<'a> {
                                            data_iter).unwrap()
         };
 
-
         println!("Done");
 
         // Create the data descriptor set for our previously created shader pipeline
@@ -173,7 +172,6 @@ impl<'a> VkProcessor<'a> {
             .add_buffer(read_buffer.clone()).unwrap()
             .add_buffer(settings_buffer.clone()).unwrap();
 
-
         self.set = Some(Arc::new(set.build().unwrap()));
 
         self.img_buffers.push(write_buffer);
@@ -188,7 +186,9 @@ impl<'a> VkProcessor<'a> {
         // The command buffer I think pretty much serves to define what runs where for how many times
         let command_buffer =
             AutoCommandBufferBuilder::primary_one_time_submit(self.device.clone(),self.queue.family()).unwrap()
-            .dispatch([self.xy.0, self.xy.1, 1], self.pipeline.clone().unwrap().clone(), self.set.clone().unwrap().clone(), ()).unwrap()
+            .dispatch([self.xy.0, self.xy.1, 1],
+                      self.pipeline.clone().unwrap().clone(),
+                      self.set.clone().unwrap().clone(), ()).unwrap()
             .build().unwrap();
 
         // Create a future for running the command buffer and then just fence it
@@ -247,411 +247,6 @@ impl<'a> VkProcessor<'a> {
     }
 }
 
-/**
-use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer};
-use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState};
-use vulkano::device::{Device, DeviceExtensions};
-use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, Subpass, RenderPassAbstract};
-use vulkano::image::SwapchainImage;
-use vulkano::instance::{Instance, PhysicalDevice};
-use vulkano::pipeline::GraphicsPipeline;
-use vulkano::pipeline::viewport::Viewport;
-use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError};
-use vulkano::swapchain;
-use vulkano::sync::{GpuFuture, FlushError};
-use vulkano::sync;
-
-use vulkano_win::VkSurfaceBuild;
-
-use winit::{EventsLoop, Window, WindowBuilder, Event, WindowEvent};
-
-use std::sync::Arc;
-
-fn main() {
-
-    let instance = {
-        let extensions = vulkano_win::required_extensions();
-        Instance::new(None, &extensions, None).unwrap()
-    };
-
-    let physical = PhysicalDevice::enumerate(&instance).next().unwrap();
-
-    // The objective of this example is to draw a triangle on a window. To do so, we first need to
-    // create the window.
-    //
-    // This is done by creating a `WindowBuilder` from the `winit` crate, then calling the
-    // `build_vk_surface` method provided by the `VkSurfaceBuild` trait from `vulkano_win`. If you
-    // ever get an error about `build_vk_surface` being undefined in one of your projects, this
-    // probably means that you forgot to import this trait.
-    //
-    // This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform winit
-    // window and a cross-platform Vulkan surface that represents the surface of the window.
-    let mut events_loop = EventsLoop::new();
-    let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap();
-    let window = surface.window();
-
-    let queue_family = physical.queue_families().find(|&q| {
-        // We take the first queue that supports drawing to our window.
-        q.supports_graphics() && surface.is_supported(q).unwrap_or(false)
-    }).unwrap();
-
-    let device_ext = DeviceExtensions { khr_swapchain: true, .. DeviceExtensions::none() };
-    let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext,
-                                           [(queue_family, 0.5)].iter().cloned()).unwrap();
-
-    let queue = queues.next().unwrap();
-
-    // Before we can draw on the surface, we have to create what is called a swapchain. Creating
-    // a swapchain allocates the color buffers that will contain the image that will ultimately
-    // be visible on the screen. These images are returned alongside with the swapchain.
-    let (mut swapchain, images) = {
-        // Querying the capabilities of the surface. When we create the swapchain we can only
-        // pass values that are allowed by the capabilities.
-        let caps = surface.capabilities(physical).unwrap();
-
-        let usage = caps.supported_usage_flags;
-
-        // The alpha mode indicates how the alpha value of the final image will behave. For example
-        // you can choose whether the window will be opaque or transparent.
-        let alpha = caps.supported_composite_alpha.iter().next().unwrap();
-
-        // Choosing the internal format that the images will have.
-        let format = caps.supported_formats[0].0;
-
-        // The dimensions of the window, only used to initially setup the swapchain.
-        // NOTE:
-        // On some drivers the swapchain dimensions are specified by `caps.current_extent` and the
-        // swapchain size must use these dimensions.
-        // These dimensions are always the same as the window dimensions
-        //
-        // However other drivers dont specify a value i.e. `caps.current_extent` is `None`
-        // These drivers will allow anything but the only sensible value is the window dimensions.
-        //
-        // Because for both of these cases, the swapchain needs to be the window dimensions, we just use that.
-        let initial_dimensions = if let Some(dimensions) = window.get_inner_size() {
-            // convert to physical pixels
-            let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
-            [dimensions.0, dimensions.1]
-        } else {
-            // The window no longer exists so exit the application.
-            return;
-        };
-
-        // Please take a look at the docs for the meaning of the parameters we didn't mention.
-        Swapchain::new(device.clone(), surface.clone(), caps.min_image_count, format,
-                       initial_dimensions, 1, usage, &queue, SurfaceTransform::Identity, alpha,
-                       PresentMode::Fifo, true, None).unwrap()
-
-    };
-
-    // We now create a buffer that will store the shape of our triangle.
-    let vertex_buffer = {
-        #[derive(Default, Debug, Clone)]
-        struct Vertex { position: [f32; 2] }
-        vulkano::impl_vertex!(Vertex, position);
-
-        CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), [
-            Vertex { position: [-0.5, -0.25] },
-            Vertex { position: [0.0, 0.5] },
-            Vertex { position: [0.25, -0.1] }
-        ].iter().cloned()).unwrap()
-    };
-
-    // The next step is to create the shaders.
-    //
-    // The raw shader creation API provided by the vulkano library is unsafe, for various reasons.
-    //
-    // An overview of what the `vulkano_shaders::shader!` macro generates can be found in the
-    // `vulkano-shaders` crate docs. You can view them at https://docs.rs/vulkano-shaders/
-    //
-    // TODO: explain this in details
-    mod vs {
-        vulkano_shaders::shader!{
-            ty: "vertex",
-            src: "
-#version 450
-
-layout(location = 0) in vec2 position;
-
-void main() {
-    gl_Position = vec4(position, 0.0, 1.0);
-}"
-        }
-    }
-
-    mod fs {
-        vulkano_shaders::shader!{
-            ty: "fragment",
-            src: "
-#version 450
-
-layout(location = 0) out vec4 f_color;
-
-void main() {
-    f_color = vec4(1.0, 0.0, 0.0, 1.0);
-}
-"
-        }
-    }
-
-    let vs = vs::Shader::load(device.clone()).unwrap();
-    let fs = fs::Shader::load(device.clone()).unwrap();
-
-    // At this point, OpenGL initialization would be finished. However in Vulkan it is not. OpenGL
-    // implicitly does a lot of computation whenever you draw. In Vulkan, you have to do all this
-    // manually.
-
-    // The next step is to create a *render pass*, which is an object that describes where the
-    // output of the graphics pipeline will go. It describes the layout of the images
-    // where the colors, depth and/or stencil information will be written.
-    let render_pass = Arc::new(vulkano::single_pass_renderpass!(
-        device.clone(),
-        attachments: {
-            // `color` is a custom name we give to the first and only attachment.
-            color: {
-                // `load: Clear` means that we ask the GPU to clear the content of this
-                // attachment at the start of the drawing.
-                load: Clear,
-                // `store: Store` means that we ask the GPU to store the output of the draw
-                // in the actual image. We could also ask it to discard the result.
-                store: Store,
-                // `format: <ty>` indicates the type of the format of the image. This has to
-                // be one of the types of the `vulkano::format` module (or alternatively one
-                // of your structs that implements the `FormatDesc` trait). Here we use the
-                // same format as the swapchain.
-                format: swapchain.format(),
-                // TODO:
-                samples: 1,
-            }
-        },
-        pass: {
-            // We use the attachment named `color` as the one and only color attachment.
-            color: [color],
-            // No depth-stencil attachment is indicated with empty brackets.
-            depth_stencil: {}
-        }
-    ).unwrap());
-
-    // Before we draw we have to create what is called a pipeline. This is similar to an OpenGL
-    // program, but much more specific.
-    let pipeline = Arc::new(GraphicsPipeline::start()
-        // We need to indicate the layout of the vertices.
-        // The type `SingleBufferDefinition` actually contains a template parameter corresponding
-        // to the type of each vertex. But in this code it is automatically inferred.
-        .vertex_input_single_buffer()
-        // A Vulkan shader can in theory contain multiple entry points, so we have to specify
-        // which one. The `main` word of `main_entry_point` actually corresponds to the name of
-        // the entry point.
-        .vertex_shader(vs.main_entry_point(), ())
-        // The content of the vertex buffer describes a list of triangles.
-        .triangle_list()
-        // Use a resizable viewport set to draw over the entire window
-        .viewports_dynamic_scissors_irrelevant(1)
-        // See `vertex_shader`.
-        .fragment_shader(fs.main_entry_point(), ())
-        // We have to indicate which subpass of which render pass this pipeline is going to be used
-        // in. The pipeline will only be usable from this particular subpass.
-        .render_pass(Subpass::from(render_pass.clone(), 0).unwrap())
-        // Now that our builder is filled, we call `build()` to obtain an actual pipeline.
-        .build(device.clone())
-        .unwrap());
-
-    // Dynamic viewports allow us to recreate just the viewport when the window is resized
-    // Otherwise we would have to recreate the whole pipeline.
-    let mut dynamic_state = DynamicState { line_width: None, viewports: None, scissors: None };
-
-    // The render pass we created above only describes the layout of our framebuffers. Before we
-    // can draw we also need to create the actual framebuffers.
-    //
-    // Since we need to draw to multiple images, we are going to create a different framebuffer for
-    // each image.
-    let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut dynamic_state);
-
-    // Initialization is finally finished!
-
-    // In some situations, the swapchain will become invalid by itself. This includes for example
-    // when the window is resized (as the images of the swapchain will no longer match the
-    // window's) or, on Android, when the application went to the background and goes back to the
-    // foreground.
-    //
-    // In this situation, acquiring a swapchain image or presenting it will return an error.
-    // Rendering to an image of that swapchain will not produce any error, but may or may not work.
-    // To continue rendering, we need to recreate the swapchain by creating a new swapchain.
-    // Here, we remember that we need to do this for the next loop iteration.
-    let mut recreate_swapchain = false;
-
-    // In the loop below we are going to submit commands to the GPU. Submitting a command produces
-    // an object that implements the `GpuFuture` trait, which holds the resources for as long as
-    // they are in use by the GPU.
-    //
-    // Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
-    // that, we store the submission of the previous frame here.
-    let mut previous_frame_end = Box::new(sync::now(device.clone())) as Box<dyn GpuFuture>;
-
-    loop {
-        // It is important to call this function from time to time, otherwise resources will keep
-        // accumulating and you will eventually reach an out of memory error.
-        // Calling this function polls various fences in order to determine what the GPU has
-        // already processed, and frees the resources that are no longer needed.
-        previous_frame_end.cleanup_finished();
-
-        // Whenever the window resizes we need to recreate everything dependent on the window size.
-        // In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
-        if recreate_swapchain {
-            // Get the new dimensions of the window.
-            let dimensions = if let Some(dimensions) = window.get_inner_size() {
-                let dimensions: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
-                [dimensions.0, dimensions.1]
-            } else {
-                return;
-            };
-
-            let (new_swapchain, new_images) = match swapchain.recreate_with_dimension(dimensions) {
-                Ok(r) => r,
-                // This error tends to happen when the user is manually resizing the window.
-                // Simply restarting the loop is the easiest way to fix this issue.
-                Err(SwapchainCreationError::UnsupportedDimensions) => continue,
-                Err(err) => panic!("{:?}", err)
-            };
-
-            swapchain = new_swapchain;
-            // Because framebuffers contains an Arc on the old swapchain, we need to
-            // recreate framebuffers as well.
-            framebuffers = window_size_dependent_setup(&new_images, render_pass.clone(), &mut dynamic_state);
-
-            recreate_swapchain = false;
-        }
-
-        // Before we can draw on the output, we have to *acquire* an image from the swapchain. If
-        // no image is available (which happens if you submit draw commands too quickly), then the
-        // function will block.
-        // This operation returns the index of the image that we are allowed to draw upon.
-        //
-        // This function can block if no image is available. The parameter is an optional timeout
-        // after which the function call will return an error.
-        let (image_num, acquire_future) = match swapchain::acquire_next_image(swapchain.clone(), None) {
-            Ok(r) => r,
-            Err(AcquireError::OutOfDate) => {
-                recreate_swapchain = true;
-                continue;
-            },
-            Err(err) => panic!("{:?}", err)
-        };
-
-        // Specify the color to clear the framebuffer with i.e. blue
-        let clear_values = vec!([0.0, 0.0, 1.0, 1.0].into());
-
-        // In order to draw, we have to build a *command buffer*. The command buffer object holds
-        // the list of commands that are going to be executed.
-        //
-        // Building a command buffer is an expensive operation (usually a few hundred
-        // microseconds), but it is known to be a hot path in the driver and is expected to be
-        // optimized.
-        //
-        // Note that we have to pass a queue family when we create the command buffer. The command
-        // buffer will only be executable on that given queue family.
-        let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(device.clone(), queue.family()).unwrap()
-            // Before we can draw, we have to *enter a render pass*. There are two methods to do
-            // this: `draw_inline` and `draw_secondary`. The latter is a bit more advanced and is
-            // not covered here.
-            //
-            // The third parameter builds the list of values to clear the attachments with. The API
-            // is similar to the list of attachments when building the framebuffers, except that
-            // only the attachments that use `load: Clear` appear in the list.
-            .begin_render_pass(framebuffers[image_num].clone(), false, clear_values)
-            .unwrap()
-
-            // We are now inside the first subpass of the render pass. We add a draw command.
-            //
-            // The last two parameters contain the list of resources to pass to the shaders.
-            // Since we used an `EmptyPipeline` object, the objects have to be `()`.
-            .draw(pipeline.clone(), &dynamic_state, vertex_buffer.clone(), (), ())
-            .unwrap()
-
-            // We leave the render pass by calling `draw_end`. Note that if we had multiple
-            // subpasses we could have called `next_inline` (or `next_secondary`) to jump to the
-            // next subpass.
-            .end_render_pass()
-            .unwrap()
-
-            // Finish building the command buffer by calling `build`.
-            .build().unwrap();
-
-        let future = previous_frame_end.join(acquire_future)
-            .then_execute(queue.clone(), command_buffer).unwrap()
-
-            // The color output is now expected to contain our triangle. But in order to show it on
-            // the screen, we have to *present* the image by calling `present`.
-            //
-            // This function does not actually present the image immediately. Instead it submits a
-            // present command at the end of the queue. This means that it will only be presented once
-            // the GPU has finished executing the command buffer that draws the triangle.
-            .then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
-            .then_signal_fence_and_flush();
-
-        match future {
-            Ok(future) => {
-                previous_frame_end = Box::new(future) as Box<_>;
-            }
-            Err(FlushError::OutOfDate) => {
-                recreate_swapchain = true;
-                previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>;
-            }
-            Err(e) => {
-                println!("{:?}", e);
-                previous_frame_end = Box::new(sync::now(device.clone())) as Box<_>;
-            }
-        }
-
-        // Note that in more complex programs it is likely that one of `acquire_next_image`,
-        // `command_buffer::submit`, or `present` will block for some time. This happens when the
-        // GPU's queue is full and the driver has to wait until the GPU finished some work.
-        //
-        // Unfortunately the Vulkan API doesn't provide any way to not wait or to detect when a
-        // wait would happen. Blocking may be the desired behavior, but if you don't want to
-        // block you should spawn a separate thread dedicated to submissions.
-
-        // Handling the window events in order to close the program when the user wants to close
-        // it.
-        let mut done = false;
-        events_loop.poll_events(|ev| {
-            match ev {
-                Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => done = true,
-                Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true,
-                _ => ()
-            }
-        });
-        if done { return; }
-    }
-}
-
-/// This method is called once during initialization, then again whenever the window is resized
-fn window_size_dependent_setup(
-    images: &[Arc<SwapchainImage<Window>>],
-    render_pass: Arc<dyn RenderPassAbstract + Send + Sync>,
-    dynamic_state: &mut DynamicState
-) -> Vec<Arc<dyn FramebufferAbstract + Send + Sync>> {
-    let dimensions = images[0].dimensions();
-
-    let viewport = Viewport {
-        origin: [0.0, 0.0],
-        dimensions: [dimensions[0] as f32, dimensions[1] as f32],
-        depth_range: 0.0 .. 1.0,
-    };
-    dynamic_state.viewports = Some(vec!(viewport));
-
-    images.iter().map(|image| {
-        Arc::new(
-            Framebuffer::start(render_pass.clone())
-                .add(image.clone()).unwrap()
-                .build().unwrap()
-        ) as Arc<dyn FramebufferAbstract + Send + Sync>
-    }).collect::<Vec<_>>()
-}
-
-
-*/
-
 
 
 
diff --git a/src/workpiece.rs b/src/workpiece.rs
index a7c265a3..b9984c3d 100644
--- a/src/workpiece.rs
+++ b/src/workpiece.rs
@@ -1,4 +1,4 @@
-use sfml::graphics::{Texture, Sprite, IntRect, Drawable};
+use sfml::graphics::{Texture, Sprite, IntRect, Drawable, RenderTarget, RenderStates};
 use sfml::system::Vector2u;
 use sfml::graphics::Transformable;
 
@@ -82,6 +82,7 @@ impl WorkpieceLoader {
 }
 
 pub struct Workpiece<'a> {
+    pub render_sprite: Sprite<'a>
 }
 
 impl<'a> Workpiece<'a> {
@@ -89,6 +90,7 @@ impl<'a> Workpiece<'a> {
     pub fn new() -> Workpiece<'a> {
 
         Workpiece {
+            render_sprite: Sprite::new()
         }
     }