Skip to content
This repository has been archived by the owner on Jun 18, 2021. It is now read-only.

Sketch of the proposed buffer mapping change #13

Closed
wants to merge 1 commit into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
186 changes: 73 additions & 113 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -693,28 +693,11 @@ impl Device {
///
/// This returns a [`CreateBufferMapped<T>`], which exposes a `&mut [T]`. The actual [`Buffer`]
/// will not be created until calling [`CreateBufferMapped::finish`].
pub fn create_buffer_mapped<'a, T>(
pub fn create_mappable_buffer<'a>(
&self,
count: usize,
usage: BufferUsage,
) -> CreateBufferMapped<'a, T>
where
T: 'static + Copy,
{
let type_size = std::mem::size_of::<T>() as BufferAddress;
assert_ne!(type_size, 0);

let desc = BufferDescriptor {
size: (type_size * count as BufferAddress).max(1),
usage,
};
let mut ptr: *mut u8 = std::ptr::null_mut();

let id = wgn::wgpu_device_create_buffer_mapped(self.id, &desc, &mut ptr as *mut *mut u8);

let data = unsafe { std::slice::from_raw_parts_mut(ptr as *mut T, count) };

CreateBufferMapped { id, data }
desc: &BufferDescriptor
) -> WriteMappableBuffer {
unimplemented!();
}

/// Creates a new [`Texture`].
Expand Down Expand Up @@ -786,108 +769,85 @@ where
phantom: std::marker::PhantomData<T>,
}

impl Buffer {
pub fn map_read_async<T, F>(&self, start: BufferAddress, size: BufferAddress, callback: F)
pub struct WriteMappableBuffer {
// ...
}

impl WriteMappableBuffer {
/// Returns a writable slice of the mapped memory.
///
/// The first time this method is called maps the memory, subsequent invocations
/// only return the slice.
///
/// Note: This could be Device::map_buffer_write(&self, buffer: &mut WriteMappableBuffer) -> &[u8]
/// The important part is that this borrows both the mappable buffer and the device so that
/// the mapped memory doesn't outlive the device nor the buffer.
pub fn map(&mut self, device: &Device) -> &mut[u8] {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That also would prevent any submissions of command buffers, since get_queue is &mut today.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see. I suppose it is not worse than what we have today since the current callback can't access the deivce and do any submission either.
Do you think it would be possible for the callback to access the device mutably or would it be complicated/unsafe to do so?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Feels complicated/unsafe to me

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok fair enough. Then I suppose passing it as non-mutable isn't worse than what we currently have, right?

unimplemented!()
}

/// Typed equivalent of `map` for convenience.
pub fn map_typed<T>(&mut self, device: &Device) -> &mut[T]
where
T: 'static + Copy,
F: FnOnce(BufferMapAsyncResult<&[T]>) + 'static,
T: 'static + Copy
{
let type_size = std::mem::size_of::<T>() as BufferAddress;
assert_ne!(type_size, 0);
assert_eq!(size % type_size, 0);

extern "C" fn buffer_map_read_callback_wrapper<T, F>(
status: wgn::BufferMapAsyncStatus,
data: *const u8,
user_data: *mut u8,
) where
F: FnOnce(BufferMapAsyncResult<&[T]>),
{
let user_data =
unsafe { Box::from_raw(user_data as *mut BufferMapReadAsyncUserData<T, F>) };
let data = unsafe {
slice::from_raw_parts(
data as *const T,
user_data.size as usize / std::mem::size_of::<T>(),
)
};
if let wgn::BufferMapAsyncStatus::Success = status {
(user_data.callback)(Ok(BufferAsyncMapping {
data,
buffer_id: user_data.buffer_id,
}));
} else {
(user_data.callback)(Err(()))
}
}
unimplemented!()
}

let user_data = Box::new(BufferMapReadAsyncUserData {
size,
callback,
buffer_id: self.id,
phantom: std::marker::PhantomData,
});
wgn::wgpu_buffer_map_read_async(
self.id,
start,
size,
buffer_map_read_callback_wrapper::<T, F>,
Box::into_raw(user_data) as *mut u8,
);
/// Unmaps the data, flushing if necessary.
/// Consumes self and returns it as a buffer.
pub fn unmap(self, device: &Device) -> Buffer {
unimplemented!()
}
}

pub struct ReadMappableBuffer {
// ...
}

impl ReadMappableBuffer {
// (See WriteMappableBuffer::map).
pub fn map(&mut self, device: &Device) -> &[u8] {
unimplemented!()
}

pub fn map_write_async<T, F>(&self, start: BufferAddress, size: BufferAddress, callback: F)
// (See WriteMappableBuffer::map_typed).
pub fn map_typed<T>(&mut self, device: &Device) -> &[T]
where
T: 'static + Copy,
F: FnOnce(BufferMapAsyncResult<&mut [T]>) + 'static,
T: 'static + Copy
{
let type_size = std::mem::size_of::<T>() as BufferAddress;
assert_ne!(type_size, 0);
assert_eq!(size % type_size, 0);

extern "C" fn buffer_map_write_callback_wrapper<T, F>(
status: wgn::BufferMapAsyncStatus,
data: *mut u8,
user_data: *mut u8,
) where
F: FnOnce(BufferMapAsyncResult<&mut [T]>),
{
let user_data =
unsafe { Box::from_raw(user_data as *mut BufferMapWriteAsyncUserData<T, F>) };
let data = unsafe {
slice::from_raw_parts_mut(
data as *mut T,
user_data.size as usize / std::mem::size_of::<T>(),
)
};
if let wgn::BufferMapAsyncStatus::Success = status {
(user_data.callback)(Ok(BufferAsyncMapping {
data,
buffer_id: user_data.buffer_id,
}));
} else {
(user_data.callback)(Err(()))
}
}
unimplemented!()
}

let user_data = Box::new(BufferMapWriteAsyncUserData {
size,
callback,
buffer_id: self.id,
phantom: std::marker::PhantomData,
});
wgn::wgpu_buffer_map_write_async(
self.id,
start,
size,
buffer_map_write_callback_wrapper::<T, F>,
Box::into_raw(user_data) as *mut u8,
);
// (See WriteMappableBuffer::unmap).
pub fn unmap(self, device: &Device) -> Buffer {
unimplemented!()
}
}

/// Flushes any pending write operations and unmaps the buffer from host memory.
pub fn unmap(&self) {
wgn::wgpu_buffer_unmap(self.id);
impl Buffer {
/// Asynchronously request a read-only mappable version of this buffer.
/// The callback will be run once the buffer can be mapped without causing stalls
///
/// self is consumed to prevent this buffer from being used until the the buffer is
/// mappable. In order to get this buffer back, unmap the provided buffer
///
/// Note: If the device is dropped, or some other factor makes mapping impossible,
/// the callback is not invoked, and is dropped.
/// It could be argued that the callback should take a `Result<(&Device, ReadMappableBuffer), Error>`.
pub fn request_read_map<F>(self, range: Range<BufferAddress>, callback: F)
where
F: FnOnce(&Device, ReadMappableBuffer) + 'static,
{
unimplemented!()
}

/// (Writable version of the above).
pub fn request_write_map<F>(self, range: Range<BufferAddress>, callback: F)
where
F: FnOnce(&Device, WriteMappableBuffer) + 'static,
{
unimplemented!()
}
}

Expand Down