diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index cf95f61b8ba..7fbd5fd3389 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -7,7 +7,9 @@ use crate::{ BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, TextureInitTracker, TextureInitTrackerAction, }, - instance, pipeline, present, resource, + instance, pipeline, present, + resource::BufferMapAsyncStatus, + resource::{self, BufferMapState}, track::{BindGroupStates, TextureSelector, Tracker}, validation::{self, check_buffer_usage, check_texture_usage}, FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored, @@ -127,7 +129,7 @@ impl RenderPassContext { } } -pub type BufferMapPendingClosure = (resource::BufferMapOperation, resource::BufferMapAsyncStatus); +pub type BufferMapPendingClosure = (resource::BufferMapOperation, BufferMapAsyncStatus); #[derive(Default)] pub struct UserClosures { @@ -3516,39 +3518,63 @@ impl Global { ) -> Result<(), resource::DestroyError> { profiling::scope!("Buffer::destroy"); - let hub = A::hub(self); - let mut token = Token::root(); + let map_closure; + // Restrict the locks to this scope. + { + let hub = A::hub(self); + let mut token = Token::root(); - //TODO: lock pending writes separately, keep the device read-only - let (mut device_guard, mut token) = hub.devices.write(&mut token); + //TODO: lock pending writes separately, keep the device read-only + let (mut device_guard, mut token) = hub.devices.write(&mut token); - log::info!("Buffer {:?} is destroyed", buffer_id); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; + log::info!("Buffer {:?} is destroyed", buffer_id); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| resource::DestroyError::Invalid)?; - let device = &mut device_guard[buffer.device_id.value]; + let device = &mut device_guard[buffer.device_id.value]; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeBuffer(buffer_id)); - } + let need_unmap = match &buffer.map_state { + BufferMapState::Waiting(..) // To get the proper callback behavior. + | BufferMapState::Init { .. } + | BufferMapState::Active { .. } + => true, + _ => false, + }; - let raw = buffer - .raw - .take() - .ok_or(resource::DestroyError::AlreadyDestroyed)?; - let temp = queue::TempResource::Buffer(raw); + map_closure = if need_unmap { + self.buffer_unmap_inner(buffer_id, buffer, device) + .unwrap_or(None) + } else { + None + }; - if device.pending_writes.dst_buffers.contains(&buffer_id) { - device.pending_writes.temp_resources.push(temp); - } else { - let last_submit_index = buffer.life_guard.life_count(); - drop(buffer_guard); - device - .lock_life(&mut token) - .schedule_resource_destruction(temp, last_submit_index); + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::FreeBuffer(buffer_id)); + } + + let raw = buffer + .raw + .take() + .ok_or(resource::DestroyError::AlreadyDestroyed)?; + let temp = queue::TempResource::Buffer(raw); + + if device.pending_writes.dst_buffers.contains(&buffer_id) { + device.pending_writes.temp_resources.push(temp); + } else { + let last_submit_index = buffer.life_guard.life_count(); + drop(buffer_guard); + device + .lock_life(&mut token) + .schedule_resource_destruction(temp, last_submit_index); + } + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((operation, status)) = map_closure { + operation.callback.call(status); } Ok(()) @@ -5345,6 +5371,7 @@ impl Global { }; if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 { + op.callback.call(BufferMapAsyncStatus::InvalidAlignment); return Err(resource::BufferAccessError::UnalignedRange); } @@ -5357,10 +5384,11 @@ impl Global { check_buffer_usage(buffer.usage, pub_usage)?; buffer.map_state = match buffer.map_state { resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => { + op.callback.call(BufferMapAsyncStatus::AlreadyMapped); return Err(resource::BufferAccessError::AlreadyMapped); } resource::BufferMapState::Waiting(_) => { - op.callback.call_error(); + op.callback.call(BufferMapAsyncStatus::MapAlreadyPending); return Ok(()); } resource::BufferMapState::Idle => { @@ -5460,19 +5488,9 @@ impl Global { fn buffer_unmap_inner( &self, buffer_id: id::BufferId, + buffer: &mut resource::Buffer, + device: &mut Device, ) -> Result, resource::BufferAccessError> { - profiling::scope!("Buffer::unmap"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| resource::BufferAccessError::Invalid)?; - let device = &mut device_guard[buffer.device_id.value]; - log::debug!("Buffer {:?} map state -> Idle", buffer_id); match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) { resource::BufferMapState::Init { @@ -5574,9 +5592,26 @@ impl Global { &self, buffer_id: id::BufferId, ) -> Result<(), resource::BufferAccessError> { - //Note: outside inner function so no locks are held when calling the callback - let closure = self.buffer_unmap_inner::(buffer_id)?; - if let Some((operation, status)) = closure { + profiling::scope!("unmap", "Buffer"); + + let closure; + { + // Restrict the locks to this scope. + let hub = A::hub(self); + let mut token = Token::root(); + + let (mut device_guard, mut token) = hub.devices.write(&mut token); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| resource::BufferAccessError::Invalid)?; + let device = &mut device_guard[buffer.device_id.value]; + + closure = self.buffer_unmap_inner(buffer_id, buffer, device) + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((operation, status)) = closure? { operation.callback.call(status); } Ok(()) diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 4c123587ac8..8a682f5ff19 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -21,6 +21,9 @@ pub enum BufferMapAsyncStatus { Aborted, Unknown, ContextLost, + InvalidAlignment, + AlreadyMapped, + MapAlreadyPending, } pub(crate) enum BufferMapState { @@ -66,6 +69,7 @@ enum BufferMapCallbackInner { C { inner: BufferMapCallbackC, }, + None, } impl BufferMapCallback { @@ -78,26 +82,39 @@ impl BufferMapCallback { /// # Safety /// /// - The callback pointer must be valid to call with the provided user_data pointer. - /// - Both pointers must point to 'static data as the callback may happen at an unspecified time. + /// - Both pointers must point to valid memory until the callback is invoked, which may happen at an unspecified time. pub unsafe fn from_c(inner: BufferMapCallbackC) -> Self { Self { inner: BufferMapCallbackInner::C { inner }, } } - pub(crate) fn call(self, status: BufferMapAsyncStatus) { - match self.inner { - BufferMapCallbackInner::Rust { callback } => callback(status), + pub(crate) fn call(mut self, status: BufferMapAsyncStatus) { + self.call_impl(status); + } + + fn call_impl(&mut self, status: BufferMapAsyncStatus) { + let callback = std::mem::replace(&mut self.inner, BufferMapCallbackInner::None); + match callback { + BufferMapCallbackInner::Rust { callback } => { + callback(status); + } // SAFETY: the contract of the call to from_c says that this unsafe is sound. BufferMapCallbackInner::C { inner } => unsafe { - (inner.callback)(status, inner.user_data) + (inner.callback)(status, inner.user_data); }, + BufferMapCallbackInner::None => {} } } +} - pub(crate) fn call_error(self) { - log::error!("wgpu_buffer_map_async failed: buffer mapping is pending"); - self.call(BufferMapAsyncStatus::Error); +impl Drop for BufferMapCallback { + fn drop(&mut self) { + // We need to make sure that the callback is always called. + // This will catch cases where we forget to call the callback (typically due to `?` error shortcuts + // in the control flow), but we should strive to call it manually to provide the most useful + // information. + self.call_impl(BufferMapAsyncStatus::Unknown); } }