Skip to content

Commit

Permalink
Ensure the BufferMapAsyncCallback is always called.
Browse files Browse the repository at this point in the history
This solves two issues on the Gecko side:
 - The callback cleans up after itself (the user data is deleted at the end of the callback), so dropping the callback without calling it is a memory leak. I can't think of a better way to implement this on the C++ side since there can be any number of callback at any time living for an unspecified amount of time.
 - This makes it easier to implement the error reporting of the WebGPU spec.
  • Loading branch information
nical committed Jul 4, 2022
1 parent c36eb9f commit a4e649b
Show file tree
Hide file tree
Showing 2 changed files with 105 additions and 53 deletions.
125 changes: 80 additions & 45 deletions wgpu-core/src/device/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@ use crate::{
BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange,
TextureInitTracker, TextureInitTrackerAction,
},
instance, pipeline, present, resource,
instance, pipeline, present,
resource::BufferMapAsyncStatus,
resource::{self, BufferMapState},
track::{BindGroupStates, TextureSelector, Tracker},
validation::{self, check_buffer_usage, check_texture_usage},
FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored,
Expand Down Expand Up @@ -127,7 +129,7 @@ impl RenderPassContext {
}
}

pub type BufferMapPendingClosure = (resource::BufferMapOperation, resource::BufferMapAsyncStatus);
pub type BufferMapPendingClosure = (resource::BufferMapOperation, BufferMapAsyncStatus);

#[derive(Default)]
pub struct UserClosures {
Expand Down Expand Up @@ -3516,39 +3518,63 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
) -> Result<(), resource::DestroyError> {
profiling::scope!("Buffer::destroy");

let hub = A::hub(self);
let mut token = Token::root();
let map_closure;
// Restrict the locks to this scope.
{
let hub = A::hub(self);
let mut token = Token::root();

//TODO: lock pending writes separately, keep the device read-only
let (mut device_guard, mut token) = hub.devices.write(&mut token);
//TODO: lock pending writes separately, keep the device read-only
let (mut device_guard, mut token) = hub.devices.write(&mut token);

log::info!("Buffer {:?} is destroyed", buffer_id);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::DestroyError::Invalid)?;
log::info!("Buffer {:?} is destroyed", buffer_id);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::DestroyError::Invalid)?;

let device = &mut device_guard[buffer.device_id.value];
let device = &mut device_guard[buffer.device_id.value];

#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::FreeBuffer(buffer_id));
}
let need_unmap = match &buffer.map_state {
BufferMapState::Waiting(..) // To get the proper callback behavior.
| BufferMapState::Init { .. }
| BufferMapState::Active { .. }
=> true,
_ => false,
};

let raw = buffer
.raw
.take()
.ok_or(resource::DestroyError::AlreadyDestroyed)?;
let temp = queue::TempResource::Buffer(raw);
map_closure = if need_unmap {
self.buffer_unmap_inner(buffer_id, buffer, device)
.unwrap_or(None)
} else {
None
};

if device.pending_writes.dst_buffers.contains(&buffer_id) {
device.pending_writes.temp_resources.push(temp);
} else {
let last_submit_index = buffer.life_guard.life_count();
drop(buffer_guard);
device
.lock_life(&mut token)
.schedule_resource_destruction(temp, last_submit_index);
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::FreeBuffer(buffer_id));
}

let raw = buffer
.raw
.take()
.ok_or(resource::DestroyError::AlreadyDestroyed)?;
let temp = queue::TempResource::Buffer(raw);

if device.pending_writes.dst_buffers.contains(&buffer_id) {
device.pending_writes.temp_resources.push(temp);
} else {
let last_submit_index = buffer.life_guard.life_count();
drop(buffer_guard);
device
.lock_life(&mut token)
.schedule_resource_destruction(temp, last_submit_index);
}
}

// Note: outside the scope where locks are held when calling the callback
if let Some((operation, status)) = map_closure {
operation.callback.call(status);
}

Ok(())
Expand Down Expand Up @@ -5345,6 +5371,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};

if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 {
op.callback.call(BufferMapAsyncStatus::InvalidAlignment);
return Err(resource::BufferAccessError::UnalignedRange);
}

Expand All @@ -5357,10 +5384,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
check_buffer_usage(buffer.usage, pub_usage)?;
buffer.map_state = match buffer.map_state {
resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => {
op.callback.call(BufferMapAsyncStatus::AlreadyMapped);
return Err(resource::BufferAccessError::AlreadyMapped);
}
resource::BufferMapState::Waiting(_) => {
op.callback.call_error();
op.callback.call(BufferMapAsyncStatus::MapAlreadyPending);
return Ok(());
}
resource::BufferMapState::Idle => {
Expand Down Expand Up @@ -5460,19 +5488,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
fn buffer_unmap_inner<A: HalApi>(
&self,
buffer_id: id::BufferId,
buffer: &mut resource::Buffer<A>,
device: &mut Device<A>,
) -> Result<Option<BufferMapPendingClosure>, resource::BufferAccessError> {
profiling::scope!("Buffer::unmap");

let hub = A::hub(self);
let mut token = Token::root();

let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::Invalid)?;
let device = &mut device_guard[buffer.device_id.value];

log::debug!("Buffer {:?} map state -> Idle", buffer_id);
match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) {
resource::BufferMapState::Init {
Expand Down Expand Up @@ -5574,9 +5592,26 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
buffer_id: id::BufferId,
) -> Result<(), resource::BufferAccessError> {
//Note: outside inner function so no locks are held when calling the callback
let closure = self.buffer_unmap_inner::<A>(buffer_id)?;
if let Some((operation, status)) = closure {
profiling::scope!("unmap", "Buffer");

let closure;
{
// Restrict the locks to this scope.
let hub = A::hub(self);
let mut token = Token::root();

let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::BufferAccessError::Invalid)?;
let device = &mut device_guard[buffer.device_id.value];

closure = self.buffer_unmap_inner(buffer_id, buffer, device)
}

// Note: outside the scope where locks are held when calling the callback
if let Some((operation, status)) = closure? {
operation.callback.call(status);
}
Ok(())
Expand Down
33 changes: 25 additions & 8 deletions wgpu-core/src/resource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ pub enum BufferMapAsyncStatus {
Aborted,
Unknown,
ContextLost,
InvalidAlignment,
AlreadyMapped,
MapAlreadyPending,
}

pub(crate) enum BufferMapState<A: hal::Api> {
Expand Down Expand Up @@ -66,6 +69,7 @@ enum BufferMapCallbackInner {
C {
inner: BufferMapCallbackC,
},
None,
}

impl BufferMapCallback {
Expand All @@ -78,26 +82,39 @@ impl BufferMapCallback {
/// # Safety
///
/// - The callback pointer must be valid to call with the provided user_data pointer.
/// - Both pointers must point to 'static data as the callback may happen at an unspecified time.
/// - Both pointers must point to valid memory until the callback is invoked, which may happen at an unspecified time.
pub unsafe fn from_c(inner: BufferMapCallbackC) -> Self {
Self {
inner: BufferMapCallbackInner::C { inner },
}
}

pub(crate) fn call(self, status: BufferMapAsyncStatus) {
match self.inner {
BufferMapCallbackInner::Rust { callback } => callback(status),
pub(crate) fn call(mut self, status: BufferMapAsyncStatus) {
self.call_impl(status);
}

fn call_impl(&mut self, status: BufferMapAsyncStatus) {
let callback = std::mem::replace(&mut self.inner, BufferMapCallbackInner::None);
match callback {
BufferMapCallbackInner::Rust { callback } => {
callback(status);
}
// SAFETY: the contract of the call to from_c says that this unsafe is sound.
BufferMapCallbackInner::C { inner } => unsafe {
(inner.callback)(status, inner.user_data)
(inner.callback)(status, inner.user_data);
},
BufferMapCallbackInner::None => {}
}
}
}

pub(crate) fn call_error(self) {
log::error!("wgpu_buffer_map_async failed: buffer mapping is pending");
self.call(BufferMapAsyncStatus::Error);
impl Drop for BufferMapCallback {
fn drop(&mut self) {
// We need to make sure that the callback is always called.
// This will catch cases where we forget to call the callback (typically due to `?` error shortcuts
// in the control flow), but we should strive to call it manually to provide the most useful
// information.
self.call_impl(BufferMapAsyncStatus::Unknown);
}
}

Expand Down

0 comments on commit a4e649b

Please sign in to comment.