diff --git a/src/buffer/dynamic.rs b/src/buffer/dynamic.rs new file mode 100644 index 00000000000..436caa3a42f --- /dev/null +++ b/src/buffer/dynamic.rs @@ -0,0 +1,541 @@ + +/// +pub struct DynamicBuffer where T: Content { + marker: PhantomData, + + context: Rc, + + /// OpenGL identifier ; can't be zero. + id: gl::types::GLuint, + + /// Type of buffer. + ty: BufferType, + + /// Size in bytes of the buffer. + size: usize, + + /// True if the buffer is currently mapped with something else than persistent mapping. + /// + /// The purpose of this flag is to detect if the user mem::forgets the `Mapping` object. + mapped: Cell, + + /// ID of the draw call where the buffer was last written as an SSBO. + latest_shader_write: Cell, +} + +impl $ty where T: Content { + /// Builds a new buffer containing the given data. The size of the buffer is equal to the size + /// of the data. + pub fn new(facade: &F, data: &T, ty: BufferType, mode: BufferMode) + -> Result<$ty, BufferCreationError> + where F: Facade + { + Alloc::new(facade, data, ty, mode) + .map(|buffer| { + $ty { + alloc: Some(buffer), + fence: Some(Fences::new()), + marker: PhantomData, + } + }) + } + + /// Builds a new buffer of the given size. + pub fn empty_unsized(facade: &F, ty: BufferType, size: usize, mode: BufferMode) + -> Result<$ty, BufferCreationError> where F: Facade + { + assert!(::is_size_suitable(size)); + + Alloc::empty(facade, ty, size, mode) + .map(|buffer| { + $ty { + alloc: Some(buffer), + fence: Some(Fences::new()), + marker: PhantomData, + } + }) + } + + /// Returns the context corresponding to this buffer. + #[inline] + pub fn get_context(&self) -> &Rc { + self.alloc.as_ref().unwrap().get_context() + } + + /// Returns the size in bytes of this buffer. + #[inline] + pub fn get_size(&self) -> usize { + self.alloc.as_ref().unwrap().get_size() + } + + /// Copies the content of the buffer to another buffer. + /// + /// # Panic + /// + /// Panics if `T` is unsized and the other buffer is too small. + /// + pub fn copy_to<'a, S>(&self, target: S) -> Result<(), CopyError> + where S: Into<$slice_ty<'a, T>>, T: 'a + { + let target = target.into(); + let alloc = self.alloc.as_ref().unwrap(); + + try!(alloc.copy_to(0 .. self.get_size(), &target.alloc, target.get_offset_bytes())); + + if let Some(inserter) = self.as_slice().add_fence() { + let mut ctxt = alloc.get_context().make_current(); + inserter.insert(&mut ctxt); + } + + if let Some(inserter) = target.add_fence() { + let mut ctxt = alloc.get_context().make_current(); + inserter.insert(&mut ctxt); + } + + Ok(()) + } + + + /// Uploads data in the buffer. + /// + /// The data must fit inside the buffer. + /// + /// # Panic + /// + /// Panics if `offset_bytes` is out of range or the data is too large to fit in the buffer. + /// + /// # Unsafety + /// + /// If the buffer uses persistent mapping, the caller of this function must handle + /// synchronization. + /// + pub unsafe fn upload(&self, offset_bytes: usize, data: &D) + where D: Content + { + assert!(offset_bytes + mem::size_of_val(data) <= self.size); + + if self.persistent_mapping.is_some() { + let mapping = Mapping { mapping: self.map_shared(offset_bytes .. offset_bytes + mem::size_of_val(data), false, true) }; + ptr::copy_nonoverlapping(data.to_void_ptr() as *const u8, ::to_void_ptr(&mapping) as *mut u8, mem::size_of_val(data)); + + } else if self.immutable { + let mut ctxt = self.context.make_current(); + self.barrier_for_buffer_update(&mut ctxt); + + self.assert_unmapped(&mut ctxt); + self.assert_not_transform_feedback(&mut ctxt); + + let (tmp_buffer, _, _, _) = create_buffer(&mut ctxt, mem::size_of_val(data), Some(data), + BufferType::CopyReadBuffer, + BufferMode::Dynamic).unwrap(); + copy_buffer(&mut ctxt, tmp_buffer, 0, self.id, offset_bytes, mem::size_of_val(data)).unwrap(); + destroy_buffer(&mut ctxt, tmp_buffer); + + } else { + assert!(offset_bytes < self.size); + + let mut ctxt = self.context.make_current(); + self.barrier_for_buffer_update(&mut ctxt); + + let invalidate_all = offset_bytes == 0 && mem::size_of_val(data) == self.size; + + self.assert_unmapped(&mut ctxt); + self.assert_not_transform_feedback(&mut ctxt); + + if invalidate_all && (ctxt.version >= &Version(Api::Gl, 4, 3) || + ctxt.extensions.gl_arb_invalidate_subdata) + { + ctxt.gl.InvalidateBufferData(self.id); + } + + if ctxt.version >= &Version(Api::Gl, 4, 5) { + ctxt.gl.NamedBufferSubData(self.id, offset_bytes as gl::types::GLintptr, + mem::size_of_val(data) as gl::types::GLsizeiptr, + data.to_void_ptr() as *const _) + + } else if ctxt.extensions.gl_ext_direct_state_access { + ctxt.gl.NamedBufferSubDataEXT(self.id, offset_bytes as gl::types::GLintptr, + mem::size_of_val(data) as gl::types::GLsizeiptr, + data.to_void_ptr() as *const _) + + } else if ctxt.version >= &Version(Api::Gl, 1, 5) || + ctxt.version >= &Version(Api::GlEs, 2, 0) + { + let bind = bind_buffer(&mut ctxt, self.id, self.ty); + ctxt.gl.BufferSubData(bind, offset_bytes as gl::types::GLintptr, + mem::size_of_val(data) as gl::types::GLsizeiptr, + data.to_void_ptr() as *const _); + + } else if ctxt.extensions.gl_arb_vertex_buffer_object { + let bind = bind_buffer(&mut ctxt, self.id, self.ty); + ctxt.gl.BufferSubDataARB(bind, offset_bytes as gl::types::GLintptr, + mem::size_of_val(data) as gl::types::GLsizeiptr, + data.to_void_ptr() as *const _); + + } else { + unreachable!(); + } + } + } + + /// Invalidates the content of the buffer. The data becomes undefined. + /// + /// `offset` and `size` are both in bytes. + /// + /// # Panic + /// + /// Panics if out of range. + /// + pub fn invalidate(&self, offset: usize, size: usize) { + assert!(offset + size <= self.size); + + let is_whole_buffer = offset == 0 && size == self.size; + + let mut ctxt = self.context.make_current(); + self.assert_unmapped(&mut ctxt); + self.assert_not_transform_feedback(&mut ctxt); + + if self.persistent_mapping.is_none() && + (ctxt.version >= &Version(Api::Gl, 4, 3) || ctxt.extensions.gl_arb_invalidate_subdata) + { + if is_whole_buffer { + unsafe { ctxt.gl.InvalidateBufferData(self.id) }; + } else { + unsafe { ctxt.gl.InvalidateBufferSubData(self.id, offset as gl::types::GLintptr, + size as gl::types::GLsizeiptr) }; + } + + } else if !self.created_with_buffer_storage { + if is_whole_buffer { + let flags = match self.creation_mode { + BufferMode::Default | BufferMode::Immutable => gl::STATIC_DRAW, + BufferMode::Persistent | BufferMode::Dynamic => gl::DYNAMIC_DRAW, + }; + + if ctxt.version >= &Version(Api::Gl, 1, 5) || + ctxt.version >= &Version(Api::GlEs, 2, 0) + { + unsafe { + let bind = bind_buffer(&mut ctxt, self.id, self.ty); + ctxt.gl.BufferData(bind, size as gl::types::GLsizeiptr, + ptr::null(), flags); + } + + } else if ctxt.extensions.gl_arb_vertex_buffer_object { + unsafe { + let bind = bind_buffer(&mut ctxt, self.id, self.ty); + ctxt.gl.BufferDataARB(bind, size as gl::types::GLsizeiptr, + ptr::null(), flags); + } + + } else { + unreachable!(); + } + } + } + } + + /// Returns a mapping in memory of the content of the buffer. + /// + /// There are two possibilities: + /// + /// - If the buffer uses persistent mapping, it will simply return a wrapper around the + /// pointer to the existing mapping. + /// - If the buffer doesn't use persistent mapping, it will create a temporary buffer which + /// will be mapped. After the mapping is released, the temporary buffer will be copied + /// to the real buffer. + /// + /// In the second case, the changes will only be written when the mapping is released. + /// Therefore this API is error-prone and shouldn't be exposed directly to the user. Instead + /// `map` public functions should take a `&mut self` instead of a `&self` to prevent users + /// from manipulating the buffer while it is "mapped". + /// + /// Contrary to `map_mut`, this function only requires a `&self`. + /// + /// # Panic + /// + /// Panicks if the `bytes_range` is not aligned to a mappable slice. + /// + /// # Unsafety + /// + /// If the buffer uses persistent mapping, the caller of this function must handle + /// synchronization. + /// + /// If you pass `false` for `read`, you **must not** read the returned buffer. If you pass + /// `false` for `write`, you **must not** write the returned buffer. + /// + unsafe fn map_shared(&self, bytes_range: Range, read: bool, write: bool) + -> MappingImpl where D: Content + { + if let Some(existing_mapping) = self.persistent_mapping.clone() { + // TODO: optimize so that it's not always necessary to make the context current + let mut ctxt = self.context.make_current(); + self.barrier_for_buffer_update(&mut ctxt); + + let data = (existing_mapping as *mut u8).offset(bytes_range.start as isize); + let data = Content::ref_from_ptr(data as *mut (), + bytes_range.end - bytes_range.start).unwrap(); + + MappingImpl::PersistentMapping { + buffer: self, + offset_bytes: bytes_range.start, + data: data, + needs_flushing: write, + } + + } else { + let size_bytes = bytes_range.end - bytes_range.start; + + let mut ctxt = self.context.make_current(); + + // we have to construct a temporary buffer that we will map in memory + // then after the Mapping is destroyed, we will copy from the temporary buffer to the + // real one + let temporary_buffer = { + let (temporary_buffer, _, _, _) = create_buffer::(&mut ctxt, size_bytes, + None, BufferType::CopyWriteBuffer, + BufferMode::Dynamic).unwrap(); + temporary_buffer + }; + + let ptr = { + self.assert_unmapped(&mut ctxt); + self.assert_not_transform_feedback(&mut ctxt); + + if read { + copy_buffer(&mut ctxt, self.id, bytes_range.start, + temporary_buffer, 0, size_bytes).unwrap(); + } + + map_buffer(&mut ctxt, temporary_buffer, self.ty, 0 .. size_bytes, true, true) + .expect("Buffer mapping is not supported by the backend") + }; + + let data = match Content::ref_from_ptr(ptr, bytes_range.end - bytes_range.start) { + Some(data) => data, + None => { + unmap_buffer(&mut ctxt, temporary_buffer, self.ty); + panic!("Wrong bytes range"); + } + }; + + MappingImpl::TemporaryBuffer { + original_buffer: self, + original_buffer_offset: bytes_range.start, + temporary_buffer: temporary_buffer, + temporary_buffer_data: data, + needs_flushing: write, + } + } + } + + /// Returns a mapping in memory of the content of the buffer. + /// + /// There are two possibilities: + /// + /// - If the buffer uses persistent mapping, it will simply return a wrapper around the + /// pointer to the existing mapping. + /// - If the buffer doesn't use persistent mapping, it will map the buffer. + /// + /// Contrary to `map_shared`, this function requires a `&mut self`. It can only be used if + /// you have exclusive access to the buffer. + /// + /// # Panic + /// + /// Panicks if the `bytes_range` is not aligned to a mappable slice. + /// + /// # Unsafety + /// + /// If the buffer uses persistent mapping, the caller of this function must handle + /// synchronization. + /// + /// If you pass `false` for `read`, you **must not** read the returned buffer. If you pass + /// `false` for `write`, you **must not** write the returned buffer. + /// + unsafe fn map_impl(&mut self, bytes_range: Range, read: bool, write: bool) + -> MappingImpl where D: Content + { + if self.persistent_mapping.is_some() || self.immutable { + self.map_shared(bytes_range, read, write) + + } else { + let data = { + let mut ctxt = self.context.make_current(); + + let ptr = { + self.assert_unmapped(&mut ctxt); + self.assert_not_transform_feedback(&mut ctxt); + self.barrier_for_buffer_update(&mut ctxt); + let ptr = map_buffer(&mut ctxt, self.id, self.ty, bytes_range.clone(), + read, write) + .expect("Buffer mapping is not supported by the backend"); + self.mapped.set(true); + ptr + }; + + match Content::ref_from_ptr(ptr, bytes_range.end - bytes_range.start) { + Some(data) => data, + None => { + unmap_buffer(&mut ctxt, self.id, self.ty); + panic!("Wrong bytes range"); + } + } + }; + + MappingImpl::RegularMapping { + buffer: self, + data: data, + needs_flushing: write, + } + } + } + + /// Returns a read and write mapping in memory of the content of the buffer. + /// + /// # Panic + /// + /// Panicks if the `bytes_range` is not aligned to a mappable slice. + /// + /// # Unsafety + /// + /// If the buffer uses persistent mapping, the caller of this function must handle + /// synchronization. + /// + #[inline] + pub unsafe fn map(&mut self, bytes_range: Range) + -> Mapping where D: Content + { + Mapping { + mapping: self.map_impl(bytes_range, true, true) + } + } + + /// Returns a read-only mapping in memory of the content of the buffer. + /// + /// # Panic + /// + /// Panicks if the `bytes_range` is not aligned to a mappable slice. + /// + /// # Unsafety + /// + /// If the buffer uses persistent mapping, the caller of this function must handle + /// synchronization. + /// + #[inline] + pub unsafe fn map_read(&mut self, bytes_range: Range) + -> ReadMapping where D: Content + { + ReadMapping { + mapping: self.map_impl(bytes_range, true, false) + } + } + + /// Returns a write-only mapping in memory of the content of the buffer. + /// + /// # Panic + /// + /// Panicks if the `bytes_range` is not aligned to a mappable slice. + /// + /// # Unsafety + /// + /// If the buffer uses persistent mapping, the caller of this function must handle + /// synchronization. + /// + #[inline] + pub unsafe fn map_write(&mut self, bytes_range: Range) + -> WriteMapping where D: Content + { + WriteMapping { + mapping: self.map_impl(bytes_range, false, true) + } + } + + /// Reads the content of the buffer. + /// + /// # Panic + /// + /// Panicks if out of range. + /// + /// # Unsafety + /// + /// If the buffer uses persistent mapping, the caller of this function must handle + /// synchronization. + /// + pub unsafe fn read(&self, range: Range) + -> Result + where D: Content + { + let size_to_read = range.end - range.start; + + if self.persistent_mapping.is_some() { + let mapping = ReadMapping { mapping: self.map_shared(range, true, false) }; + ::read(size_to_read, |output| { + ptr::copy_nonoverlapping(::to_void_ptr(&mapping) as *const u8, output as *mut D as *mut u8, size_to_read); + Ok(()) + }) + + } else { + let mut ctxt = self.context.make_current(); + + if ctxt.state.lost_context { + return Err(ReadError::ContextLost); + } + + self.assert_unmapped(&mut ctxt); + self.barrier_for_buffer_update(&mut ctxt); + + ::read(size_to_read, |output| { + if ctxt.version >= &Version(Api::Gl, 4, 5) { + ctxt.gl.GetNamedBufferSubData(self.id, range.start as gl::types::GLintptr, + size_to_read as gl::types::GLsizeiptr, + output as *mut _ as *mut _); + + } else if ctxt.version >= &Version(Api::Gl, 1, 5) { + let bind = bind_buffer(&mut ctxt, self.id, self.ty); + ctxt.gl.GetBufferSubData(bind, range.start as gl::types::GLintptr, + size_to_read as gl::types::GLsizeiptr, + output as *mut _ as *mut _); + + } else if ctxt.extensions.gl_arb_vertex_buffer_object { + let bind = bind_buffer(&mut ctxt, self.id, self.ty); + ctxt.gl.GetBufferSubDataARB(bind, range.start as gl::types::GLintptr, + size_to_read as gl::types::GLsizeiptr, + output as *mut _ as *mut _); + + } else if ctxt.version >= &Version(Api::GlEs, 1, 0) { + return Err(ReadError::NotSupported); + + } else { + unreachable!() + } + + Ok(()) + }) + } + } + + /// Copies data from this buffer to another one. + /// + /// With persistent-mapped buffers you must create a sync fence *after* this operation. + /// + /// # Panic + /// + /// Panics if the offset/sizes are out of range. + /// + pub fn copy_to(&self, range: Range, target: &Alloc, dest_offset: usize) + -> Result<(), CopyError> + { + // TODO: read+write manually + // TODO: check that the other buffer belongs to the same context + + assert!(range.end >= range.start); + assert!(range.end <= self.size); + assert!(dest_offset + range.end - range.start <= target.size); + + let mut ctxt = self.context.make_current(); + + unsafe { + copy_buffer(&mut ctxt, self.id, range.start, target.id, dest_offset, + range.end - range.start) + } + } \ No newline at end of file diff --git a/src/buffer/immutable.rs b/src/buffer/immutable.rs new file mode 100644 index 00000000000..b6f057a8920 --- /dev/null +++ b/src/buffer/immutable.rs @@ -0,0 +1,273 @@ +use buffer::raw; +use buffer::Content; +use buffer::Storage; + +/// +pub struct ImmutableBuffer where T: Content { + marker: PhantomData, + + context: Rc, + + /// OpenGL identifier ; can't be zero. + id: gl::types::GLuint, + + /// Type of buffer. + ty: BufferType, + + /// Size in bytes of the buffer. + size: usize, +} + +impl ImmutableBuffer where T: Content { + pub fn new(facade: &F, data: &T, ty: BufferType) + -> Result, BufferCreationError> + where F: Facade + { + let size = mem::size_of_val(data); + let id = try!(unsafe { create_buffer(&mut ctxt, size, Some(data), ty, mode) }); + + Ok(ImmutableBuffer { + context: facade.get_context().clone(), + id: id, + ty: ty, + size: size, + }) + } + + pub fn empty(facade: &F, ty: BufferType) -> Result, BufferCreationError> + where F: Facade, T: Sized, T: Copy + { + let size = mem::size_of::(); + let id = try!(unsafe { create_buffer::<()>(&mut ctxt, size, None, ty) }); + + Ok(ImmutableBuffer { + context: facade.get_context().clone(), + id: id, + ty: ty, + size: size, + }) + } + + pub fn empty_array(facade: &F, len: usize, ty: BufferType) + -> Result, BufferCreationError> + where F: Facade, T: ArrayContent + { + let mut ctxt = facade.get_context().make_current(); + + let size = len * ::element_size(); + let id = try!(unsafe { create_buffer::<()>(&mut ctxt, size, None, ty) }); + + Ok(ImmutableBuffer { + context: facade.get_context().clone(), + id: id, + ty: ty, + size: size, + }) + } + + pub fn empty_unsized(facade: &F, size: usize, ty: BufferType) + -> Result, BufferCreationError> + where F: Facade, T: Copy + { + let mut ctxt = facade.get_context().make_current(); + let id = try!(unsafe { create_buffer::<()>(&mut ctxt, size, None, ty) }); + + Ok(ImmutableBuffer { + context: facade.get_context().clone(), + id: id, + ty: ty, + size: size, + }) + } + + /// See the `CopyTo` trait. + pub fn copy_to(&self, target: &S) -> Result<(), CopyError> + where S: Storage + { + // TODO: check that the other buffer belongs to the same context + + assert!(range.end >= range.start); + assert!(range.end <= self.size); + assert!(dest_offset + range.end - range.start <= target.size); + + let mut ctxt = self.context.make_current(); + + unsafe { + raw::copy_buffer(&mut ctxt, self.id, range.start, target.id, dest_offset, + range.end - range.start) + } + } + + /// See the `Invalidate` trait. + pub fn invalidate(&self) { + unsafe { + if ctxt.version >= &Version(Api::Gl, 4, 3) || + ctxt.extensions.gl_arb_invalidate_subdata + { + if is_whole_buffer { + unsafe { ctxt.gl.InvalidateBufferData(self.id) }; + } else { + unsafe { ctxt.gl.InvalidateBufferSubData(self.id, offset as gl::types::GLintptr, + size as gl::types::GLsizeiptr) }; + } + } + } + } +} + +impl Drop for ImmutableBuffer { + fn drop(&mut self) { + unsafe { + let mut ctxt = self.context.make_current(); + self.assert_not_transform_feedback(&mut ctxt); + VertexAttributesSystem::purge_buffer(&mut ctxt, self.id); + raw::destroy_buffer(&mut ctxt, self.id); + } + } +} + +impl Storage for ImmutableBuffer { + fn as_slice_any(&self) -> BufferAnySlice { + + } + + #[inline] + fn size(&self) -> usize { + self.size + } +} + +impl Invalidate for ImmutableBuffer { + #[inline] + fn invalidate(&self) { + self.invalidate() + } +} + +impl Create for ImmutableBuffer { + #[inline] + fn new(facade: &F, data: &T, ty: BufferType) + -> Result, BufferCreationError> + where F: Facade + { + ImmutableBuffer::new(facade, data, ty) + } + + #[inline] + fn empty(facade: &F, ty: BufferType) + -> Result, BufferCreationError> + where F: Facade, T: Copy + { + ImmutableBuffer::empty(facade, ty) + } + + #[inline] + fn empty_array(facade: &F, len: usize, ty: BufferType) + -> Result, BufferCreationError> + where F: Facade, T: ArrayContent + { + ImmutableBuffer::empty_array(facade, len, ty) + } + + #[inline] + fn empty_unsized(facade: &F, size: usize, ty: BufferType) + -> Result, BufferCreationError> + where F: Facade, T: Copy + { + ImmutableBuffer::empty_unsized(facade, size, ty) + } +} + +impl CopyTo for ImmutableBuffer where T: Content { + #[inline] + fn copy_to(&self, target: &S) -> Result<(), CopyError> + where S: Storage + { + self.copy_to(target) + } +} + +/// Creates a new buffer. +/// +/// # Panic +/// +/// Panics if `mem::size_of_val(&data) != size`. +unsafe fn create_buffer(mut ctxt: &mut CommandContext, size: usize, data: Option<&D>, + ty: BufferType) -> Result + where D: Content +{ + if !is_buffer_type_supported(ctxt, ty) { + return Err(BufferCreationError::BufferTypeNotSupported); + } + + if let Some(data) = data { + assert!(mem::size_of_val(data) == size); + } + + // creating the id of the buffer + let id = raw::create_buffer_name(ctxt); + + // raw pointer to data + let data_ptr = if let Some(data) = data { + if size == 0 { // if the size is `0` we pass `1` instead (see below), + ptr::null() // so it's important to have `null` here + } else { + data.to_void_ptr() + } + } else { + ptr::null() + }; + + // if the `size` is 0 bytes then we use 1 instead, otherwise nvidia drivers complain + // note that according to glium the size of the buffer will remain 0 + let size = match size { + 0 => 1, + a => a + }; + + // will store the actual size of the buffer so that we can compare it with the expected size + let mut obtained_size: gl::types::GLint = mem::uninitialized(); + + if ctxt.version >= &Version(Api::Gl, 4, 5) || ctxt.extensions.gl_arb_direct_state_access { + ctxt.gl.NamedBufferStorage(id, size as gl::types::GLsizeiptr, data_ptr as *const _, 0); + ctxt.gl.GetNamedBufferParameteriv(id, gl::BUFFER_SIZE, &mut obtained_size); + + } else if ctxt.extensions.gl_arb_buffer_storage && + ctxt.extensions.gl_ext_direct_state_access + { + ctxt.gl.NamedBufferStorageEXT(id, size as gl::types::GLsizeiptr, data_ptr as *const _, 0); + ctxt.gl.GetNamedBufferParameterivEXT(id, gl::BUFFER_SIZE, &mut obtained_size); + + } else if ctxt.version >= &Version(Api::Gl, 4, 4) || + ctxt.extensions.gl_arb_buffer_storage + { + let bind = bind_buffer(&mut ctxt, id, ty); + ctxt.gl.BufferStorage(bind, size as gl::types::GLsizeiptr, data_ptr as *const _, 0); + ctxt.gl.GetBufferParameteriv(bind, gl::BUFFER_SIZE, &mut obtained_size); + + } else if ctxt.extensions.gl_ext_buffer_storage { + let bind = bind_buffer(&mut ctxt, id, ty); + ctxt.gl.BufferStorageEXT(bind, size as gl::types::GLsizeiptr, data_ptr as *const _, 0); + ctxt.gl.GetBufferParameteriv(bind, gl::BUFFER_SIZE, &mut obtained_size); + + } else { + // FIXME: return error instead + panic!() + } + + if size != obtained_size as usize { + if ctxt.version >= &Version(Api::Gl, 1, 5) || + ctxt.version >= &Version(Api::GlEs, 2, 0) + { + ctxt.gl.DeleteBuffers(1, [id].as_ptr()); + } else if ctxt.extensions.gl_arb_vertex_buffer_object { + ctxt.gl.DeleteBuffersARB(1, [id].as_ptr()); + } else { + unreachable!(); + } + + return Err(BufferCreationError::OutOfMemory); + } + + Ok(id) +} diff --git a/src/buffer/lock.rs b/src/buffer/lock.rs new file mode 100644 index 00000000000..5c769821fbf --- /dev/null +++ b/src/buffer/lock.rs @@ -0,0 +1,16 @@ +use sync::SyncFence; + +pub struct GlobalFence { + inner: T, + fence: Option, +} + +impl GlobalFence { + #[inline] + pub fn new(inner: T) -> GlobalFence { + GlobalFence { + inner: inner, + fence: None, + } + } +} diff --git a/src/buffer/mod.rs b/src/buffer/mod.rs index 8c1d6ceda61..3465f1d6ddb 100644 --- a/src/buffer/mod.rs +++ b/src/buffer/mod.rs @@ -80,7 +80,11 @@ use std::slice; use backend::Facade; mod alloc; +mod dynamic; mod fences; +mod imutable; +mod lock; +mod persistent; mod raw; mod view; diff --git a/src/buffer/persistent.rs b/src/buffer/persistent.rs new file mode 100644 index 00000000000..52f2d464f5a --- /dev/null +++ b/src/buffer/persistent.rs @@ -0,0 +1,171 @@ +use buffer::raw; +use buffer::Content; +use buffer::Storage; + +use vertex_array_object::VertexAttributesSystem; + +/// A buffer which is mapped in a persistent fashion. This means that accessing its content doesn't +/// require any OpenGL function call and is very fast. +/// +/// However accessing the content of the buffer is unsafe, because it must not occur at the same +/// time as the GPU. Therefore you shouldn't generally use a `PersistentBuffer` alone. +pub struct PersistentBuffer where T: Content { + marker: PhantomData, + + context: Rc, + + /// OpenGL identifier ; can't be zero. + id: gl::types::GLuint, + + /// Type of buffer. + ty: BufferType, + + /// Size in bytes of the buffer. + size: usize, + + mapping: *mut T, +} + +impl PersistentBuffer where T: Content { + /// Accesses the content of the buffer. + #[inline] + pub unsafe fn access<'a>(&'a self) -> &'a mut T { + self.mapping as &mut T + } +} + +impl Drop for PersistentBuffer { + fn drop(&mut self) { + unsafe { + let mut ctxt = self.context.make_current(); + raw::unmap_buffer(&mut ctxt, self.id, self.ty); + self.assert_not_transform_feedback(&mut ctxt); + VertexAttributesSystem::purge_buffer(&mut ctxt, self.id); + raw::destroy_buffer(&mut ctxt, self.id); + } + } +} + +// TODO: should this be implemented? what if `gpu_access` is called and then the buffer is passed to +// a lock constructor? +impl Storage for PersistentBuffer { + fn as_slice_any(&self) -> BufferAnySlice { + + } + + #[inline] + fn size(&self) -> usize { + self.size + } +} + +/// Creates a new buffer. +/// +/// # Panic +/// +/// Panics if `mem::size_of_val(&data) != size`. +unsafe fn create_buffer(mut ctxt: &mut CommandContext, size: usize, data: Option<&D>, + ty: BufferType) -> Result<(gl::types::GLuint, + *const raw::c_void), BufferCreationError> + where D: Content +{ + if !is_buffer_type_supported(ctxt, ty) { + return Err(BufferCreationError::BufferTypeNotSupported); + } + + if let Some(data) = data { + assert!(mem::size_of_val(data) == size); + } + + // creating the id of the buffer + let id = raw::create_buffer_name(ctxt); + + // raw pointer to data + let data_ptr = if let Some(data) = data { + if size == 0 { // if the size is `0` we pass `1` instead (see below), + ptr::null() // so it's important to have `null` here + } else { + data.to_void_ptr() + } + } else { + ptr::null() + }; + + // if the `size` is 0 bytes then we use 1 instead, otherwise nvidia drivers complain + // note that according to glium the size of the buffer will remain 0 + let size = match size { + 0 => 1, + a => a + }; + + let flags = gl::MAP_PERSISTENT_BIT | gl::MAP_READ_BIT | gl::MAP_WRITE_BIT; + + // will store the actual size of the buffer so that we can compare it with the expected size + let mut obtained_size: gl::types::GLint = mem::uninitialized(); + + if ctxt.version >= &Version(Api::Gl, 4, 5) || ctxt.extensions.gl_arb_direct_state_access { + ctxt.gl.NamedBufferStorage(id, size as gl::types::GLsizeiptr, data_ptr as *const _, flags); + ctxt.gl.GetNamedBufferParameteriv(id, gl::BUFFER_SIZE, &mut obtained_size); + + } else if ctxt.extensions.gl_arb_buffer_storage && + ctxt.extensions.gl_ext_direct_state_access + { + ctxt.gl.NamedBufferStorageEXT(id, size as gl::types::GLsizeiptr, + data_ptr as *const _, flags); + ctxt.gl.GetNamedBufferParameterivEXT(id, gl::BUFFER_SIZE, &mut obtained_size); + + } else if ctxt.version >= &Version(Api::Gl, 4, 4) || + ctxt.extensions.gl_arb_buffer_storage + { + let bind = bind_buffer(&mut ctxt, id, ty); + ctxt.gl.BufferStorage(bind, size as gl::types::GLsizeiptr, data_ptr as *const _, flags); + ctxt.gl.GetBufferParameteriv(bind, gl::BUFFER_SIZE, &mut obtained_size); + + } else if ctxt.extensions.gl_ext_buffer_storage { + let bind = bind_buffer(&mut ctxt, id, ty); + ctxt.gl.BufferStorageEXT(bind, size as gl::types::GLsizeiptr, data_ptr as *const _, flags); + ctxt.gl.GetBufferParameteriv(bind, gl::BUFFER_SIZE, &mut obtained_size); + + } else { + // FIXME: return error instead + panic!() + } + + if size != obtained_size as usize { + if ctxt.version >= &Version(Api::Gl, 1, 5) || + ctxt.version >= &Version(Api::GlEs, 2, 0) + { + ctxt.gl.DeleteBuffers(1, [id].as_ptr()); + } else if ctxt.extensions.gl_arb_vertex_buffer_object { + ctxt.gl.DeleteBuffersARB(1, [id].as_ptr()); + } else { + unreachable!(); + } + + return Err(BufferCreationError::OutOfMemory); + } + + let ptr = if ctxt.version >= &Version(Api::Gl, 4, 5) { + ctxt.gl.MapNamedBufferRange(id, 0, size as gl::types::GLsizeiptr, + gl::MAP_READ_BIT | gl::MAP_WRITE_BIT | + gl::MAP_PERSISTENT_BIT | gl::MAP_FLUSH_EXPLICIT_BIT) + + } else if ctxt.version >= &Version(Api::Gl, 3, 0) || + ctxt.extensions.gl_arb_map_buffer_range + { + let bind = bind_buffer(&mut ctxt, id, ty); + ctxt.gl.MapBufferRange(bind, 0, size as gl::types::GLsizeiptr, + gl::MAP_READ_BIT | gl::MAP_WRITE_BIT | + gl::MAP_PERSISTENT_BIT | gl::MAP_FLUSH_EXPLICIT_BIT) + } else { + // already checked above + unreachable!(); + }; + + if ptr.is_null() { + let error = ::get_gl_error(ctxt); + panic!("glMapBufferRange returned null (error: {:?})", error); + } + + Ok((id, ptr)) +} diff --git a/src/buffer/raw.rs b/src/buffer/raw.rs index 191f56a0c48..95355678dfb 100644 --- a/src/buffer/raw.rs +++ b/src/buffer/raw.rs @@ -76,6 +76,26 @@ pub fn is_buffer_type_supported(ctxt: &mut CommandContext, ty: BufferType) -> bo } } +/// Reserves a new identifier for a buffer. +#[inline] +pub unsafe fn create_buffer_name(ctxt: &mut CommandContext) -> gl::types::GLuint { + let mut id: gl::types::GLuint = mem::uninitialized(); + + if ctxt.version >= &Version(Api::Gl, 4, 5) || ctxt.extensions.gl_arb_direct_state_access { + ctxt.gl.CreateBuffers(1, &mut id); + } else if ctxt.version >= &Version(Api::Gl, 1, 5) || + ctxt.version >= &Version(Api::GlEs, 2, 0) + { + ctxt.gl.GenBuffers(1, &mut id); + } else if ctxt.extensions.gl_arb_vertex_buffer_object { + ctxt.gl.GenBuffersARB(1, &mut id); + } else { + unreachable!(); + } + + id +} + /// Binds a buffer of the given type, and returns the GLenum of the bind point. /// `id` can be 0. ///