From 324de1bef620e88a2507a434b7584933104fbe4c Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Fri, 8 Jul 2022 08:29:05 +0200 Subject: [PATCH] Ensure the BufferMapCallback is always called. (#2848) * Ensure the BufferMapAsyncCallback is always called. This solves two issues on the Gecko side: - The callback cleans up after itself (the user data is deleted at the end of the callback), so dropping the callback without calling it is a memory leak. I can't think of a better way to implement this on the C++ side since there can be any number of callback at any time living for an unspecified amount of time. - This makes it easier to implement the error reporting of the WebGPU spec. * Update the changelog. --- CHANGELOG.md | 1 + wgpu-core/src/device/mod.rs | 221 ++++++++++++++++++++++++------------ wgpu-core/src/resource.rs | 59 +++++++--- 3 files changed, 196 insertions(+), 85 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a02fac589a..bc40b229c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ Bottom level categories: - Allow running `get_texture_format_features` on unsupported texture formats (returning no flags) by @cwfitzgerald in [#2856](https://github.com/gfx-rs/wgpu/pull/2856) - Allow multi-sampled textures that are supported by the device but not WebGPU if `TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES` is enabled by @cwfitzgerald in [#2856](https://github.com/gfx-rs/wgpu/pull/2856) - `get_texture_format_features` only lists the COPY_* usages if the adapter actually supports that usage by @cwfitzgerald in [#2856](https://github.com/gfx-rs/wgpu/pull/2856) +- Improve the validation and error reporting of buffer mappings by @nical in [#2848](https://github.com/gfx-rs/wgpu/pull/2848) #### DX12 - `DownlevelCapabilities::default()` now returns the `ANISOTROPIC_FILTERING` flag set to true so DX12 lists `ANISOTROPIC_FILTERING` as true again by @cwfitzgerald in [#2851](https://github.com/gfx-rs/wgpu/pull/2851) diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 68a012eced..eb96d6d7f3 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -7,7 +7,9 @@ use crate::{ BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, TextureInitTracker, TextureInitTrackerAction, }, - instance, pipeline, present, resource, + instance, pipeline, present, + resource::{self, BufferMapState}, + resource::{BufferAccessError, BufferMapAsyncStatus, BufferMapOperation}, track::{BindGroupStates, TextureSelector, Tracker}, validation::{self, check_buffer_usage, check_texture_usage}, FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored, @@ -127,7 +129,7 @@ impl RenderPassContext { } } -pub type BufferMapPendingClosure = (resource::BufferMapOperation, resource::BufferMapAsyncStatus); +pub type BufferMapPendingClosure = (BufferMapOperation, BufferMapAsyncStatus); #[derive(Default)] pub struct UserClosures { @@ -159,7 +161,7 @@ fn map_buffer( offset: BufferAddress, size: BufferAddress, kind: HostMap, -) -> Result, resource::BufferAccessError> { +) -> Result, BufferAccessError> { let mapping = unsafe { raw.map_buffer(buffer.raw.as_ref().unwrap(), offset..offset + size) .map_err(DeviceError::from)? @@ -3409,7 +3411,7 @@ impl Global { buffer_id: id::BufferId, offset: BufferAddress, data: &[u8], - ) -> Result<(), resource::BufferAccessError> { + ) -> Result<(), BufferAccessError> { profiling::scope!("Device::set_buffer_sub_data"); let hub = A::hub(self); @@ -3422,7 +3424,7 @@ impl Global { .map_err(|_| DeviceError::Invalid)?; let buffer = buffer_guard .get_mut(buffer_id) - .map_err(|_| resource::BufferAccessError::Invalid)?; + .map_err(|_| BufferAccessError::Invalid)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?; //assert!(buffer isn't used by the GPU); @@ -3466,7 +3468,7 @@ impl Global { buffer_id: id::BufferId, offset: BufferAddress, data: &mut [u8], - ) -> Result<(), resource::BufferAccessError> { + ) -> Result<(), BufferAccessError> { profiling::scope!("Device::get_buffer_sub_data"); let hub = A::hub(self); @@ -3479,7 +3481,7 @@ impl Global { .map_err(|_| DeviceError::Invalid)?; let buffer = buffer_guard .get_mut(buffer_id) - .map_err(|_| resource::BufferAccessError::Invalid)?; + .map_err(|_| BufferAccessError::Invalid)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?; //assert!(buffer isn't used by the GPU); @@ -3515,39 +3517,59 @@ impl Global { ) -> Result<(), resource::DestroyError> { profiling::scope!("Buffer::destroy"); - let hub = A::hub(self); - let mut token = Token::root(); + let map_closure; + // Restrict the locks to this scope. + { + let hub = A::hub(self); + let mut token = Token::root(); - //TODO: lock pending writes separately, keep the device read-only - let (mut device_guard, mut token) = hub.devices.write(&mut token); + //TODO: lock pending writes separately, keep the device read-only + let (mut device_guard, mut token) = hub.devices.write(&mut token); - log::info!("Buffer {:?} is destroyed", buffer_id); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; + log::info!("Buffer {:?} is destroyed", buffer_id); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| resource::DestroyError::Invalid)?; - let device = &mut device_guard[buffer.device_id.value]; + let device = &mut device_guard[buffer.device_id.value]; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeBuffer(buffer_id)); - } + map_closure = match &buffer.map_state { + &BufferMapState::Waiting(..) // To get the proper callback behavior. + | &BufferMapState::Init { .. } + | &BufferMapState::Active { .. } + => { + self.buffer_unmap_inner(buffer_id, buffer, device) + .unwrap_or(None) + } + _ => None, + }; - let raw = buffer - .raw - .take() - .ok_or(resource::DestroyError::AlreadyDestroyed)?; - let temp = queue::TempResource::Buffer(raw); + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::FreeBuffer(buffer_id)); + } - if device.pending_writes.dst_buffers.contains(&buffer_id) { - device.pending_writes.temp_resources.push(temp); - } else { - let last_submit_index = buffer.life_guard.life_count(); - drop(buffer_guard); - device - .lock_life(&mut token) - .schedule_resource_destruction(temp, last_submit_index); + let raw = buffer + .raw + .take() + .ok_or(resource::DestroyError::AlreadyDestroyed)?; + let temp = queue::TempResource::Buffer(raw); + + if device.pending_writes.dst_buffers.contains(&buffer_id) { + device.pending_writes.temp_resources.push(temp); + } else { + let last_submit_index = buffer.life_guard.life_count(); + drop(buffer_guard); + device + .lock_life(&mut token) + .schedule_resource_destruction(temp, last_submit_index); + } + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((operation, status)) = map_closure { + operation.callback.call(status); } Ok(()) @@ -5331,8 +5353,50 @@ impl Global { &self, buffer_id: id::BufferId, range: Range, - op: resource::BufferMapOperation, - ) -> Result<(), resource::BufferAccessError> { + op: BufferMapOperation, + ) -> Result<(), BufferAccessError> { + // User callbacks must not be called while holding buffer_map_async_inner's locks, so we + // defer the error callback if it needs to be called immediately (typically when running + // into errors). + if let Err((op, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { + let status = match &err { + &BufferAccessError::Device(_) => BufferMapAsyncStatus::ContextLost, + &BufferAccessError::Invalid | &BufferAccessError::Destroyed => { + BufferMapAsyncStatus::Invalid + } + &BufferAccessError::AlreadyMapped => BufferMapAsyncStatus::AlreadyMapped, + &BufferAccessError::MapAlreadyPending => BufferMapAsyncStatus::MapAlreadyPending, + &BufferAccessError::MissingBufferUsage(_) => { + BufferMapAsyncStatus::InvalidUsageFlags + } + &BufferAccessError::UnalignedRange + | &BufferAccessError::UnalignedRangeSize { .. } + | &BufferAccessError::UnalignedOffset { .. } => { + BufferMapAsyncStatus::InvalidAlignment + } + &BufferAccessError::OutOfBoundsUnderrun { .. } + | &BufferAccessError::OutOfBoundsOverrun { .. } => { + BufferMapAsyncStatus::InvalidRange + } + _ => BufferMapAsyncStatus::Error, + }; + + op.callback.call(status); + + return Err(err); + } + + Ok(()) + } + + // Returns the mapping callback in case of error so that the callback can be fired outside + // of the locks that are held in this function. + fn buffer_map_async_inner( + &self, + buffer_id: id::BufferId, + range: Range, + op: BufferMapOperation, + ) -> Result<(), (BufferMapOperation, BufferAccessError)> { profiling::scope!("Buffer::map_async"); let hub = A::hub(self); @@ -5344,23 +5408,32 @@ impl Global { }; if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 { - return Err(resource::BufferAccessError::UnalignedRange); + return Err((op, BufferAccessError::UnalignedRange)); } let (device_id, ref_count) = { let (mut buffer_guard, _) = hub.buffers.write(&mut token); let buffer = buffer_guard .get_mut(buffer_id) - .map_err(|_| resource::BufferAccessError::Invalid)?; + .map_err(|_| BufferAccessError::Invalid); + + let buffer = match buffer { + Ok(b) => b, + Err(e) => { + return Err((op, e)); + } + }; + + if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) { + return Err((op, e.into())); + } - check_buffer_usage(buffer.usage, pub_usage)?; buffer.map_state = match buffer.map_state { resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => { - return Err(resource::BufferAccessError::AlreadyMapped); + return Err((op, BufferAccessError::AlreadyMapped)); } resource::BufferMapState::Waiting(_) => { - op.callback.call_error(); - return Ok(()); + return Err((op, BufferAccessError::MapAlreadyPending)); } resource::BufferMapState::Idle => { resource::BufferMapState::Waiting(resource::BufferPendingMapping { @@ -5399,7 +5472,7 @@ impl Global { buffer_id: id::BufferId, offset: BufferAddress, size: Option, - ) -> Result<(*mut u8, u64), resource::BufferAccessError> { + ) -> Result<(*mut u8, u64), BufferAccessError> { profiling::scope!("Buffer::get_mapped_range"); let hub = A::hub(self); @@ -5407,7 +5480,7 @@ impl Global { let (buffer_guard, _) = hub.buffers.read(&mut token); let buffer = buffer_guard .get(buffer_id) - .map_err(|_| resource::BufferAccessError::Invalid)?; + .map_err(|_| BufferAccessError::Invalid)?; let range_size = if let Some(size) = size { size @@ -5418,17 +5491,17 @@ impl Global { }; if offset % wgt::MAP_ALIGNMENT != 0 { - return Err(resource::BufferAccessError::UnalignedOffset { offset }); + return Err(BufferAccessError::UnalignedOffset { offset }); } if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 { - return Err(resource::BufferAccessError::UnalignedRangeSize { range_size }); + return Err(BufferAccessError::UnalignedRangeSize { range_size }); } match buffer.map_state { resource::BufferMapState::Init { ptr, .. } => { // offset (u64) can not be < 0, so no need to validate the lower bound if offset + range_size > buffer.size { - return Err(resource::BufferAccessError::OutOfBoundsOverrun { + return Err(BufferAccessError::OutOfBoundsOverrun { index: offset + range_size - 1, max: buffer.size, }); @@ -5437,13 +5510,13 @@ impl Global { } resource::BufferMapState::Active { ptr, ref range, .. } => { if offset < range.start { - return Err(resource::BufferAccessError::OutOfBoundsUnderrun { + return Err(BufferAccessError::OutOfBoundsUnderrun { index: offset, min: range.start, }); } if offset + range_size > range.end { - return Err(resource::BufferAccessError::OutOfBoundsOverrun { + return Err(BufferAccessError::OutOfBoundsOverrun { index: offset + range_size - 1, max: range.end, }); @@ -5451,7 +5524,7 @@ impl Global { unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) } } resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => { - Err(resource::BufferAccessError::NotMapped) + Err(BufferAccessError::NotMapped) } } } @@ -5459,19 +5532,9 @@ impl Global { fn buffer_unmap_inner( &self, buffer_id: id::BufferId, - ) -> Result, resource::BufferAccessError> { - profiling::scope!("Buffer::unmap"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| resource::BufferAccessError::Invalid)?; - let device = &mut device_guard[buffer.device_id.value]; - + buffer: &mut resource::Buffer, + device: &mut Device, + ) -> Result, BufferAccessError> { log::debug!("Buffer {:?} map state -> Idle", buffer_id); match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) { resource::BufferMapState::Init { @@ -5501,10 +5564,7 @@ impl Global { } } - let raw_buf = buffer - .raw - .as_ref() - .ok_or(resource::BufferAccessError::Destroyed)?; + let raw_buf = buffer.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; buffer.life_guard.use_at(device.active_submission_index + 1); let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { @@ -5535,7 +5595,7 @@ impl Global { device.pending_writes.dst_buffers.insert(buffer_id); } resource::BufferMapState::Idle => { - return Err(resource::BufferAccessError::NotMapped); + return Err(BufferAccessError::NotMapped); } resource::BufferMapState::Waiting(pending) => { return Ok(Some((pending.op, resource::BufferMapAsyncStatus::Aborted))); @@ -5572,10 +5632,27 @@ impl Global { pub fn buffer_unmap( &self, buffer_id: id::BufferId, - ) -> Result<(), resource::BufferAccessError> { - //Note: outside inner function so no locks are held when calling the callback - let closure = self.buffer_unmap_inner::(buffer_id)?; - if let Some((operation, status)) = closure { + ) -> Result<(), BufferAccessError> { + profiling::scope!("unmap", "Buffer"); + + let closure; + { + // Restrict the locks to this scope. + let hub = A::hub(self); + let mut token = Token::root(); + + let (mut device_guard, mut token) = hub.devices.write(&mut token); + let (mut buffer_guard, _) = hub.buffers.write(&mut token); + let buffer = buffer_guard + .get_mut(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; + let device = &mut device_guard[buffer.device_id.value]; + + closure = self.buffer_unmap_inner(buffer_id, buffer, device) + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((operation, status)) = closure? { operation.callback.call(status); } Ok(()) diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 4c123587ac..ed08d20f23 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -13,14 +13,37 @@ use thiserror::Error; use std::{borrow::Borrow, num::NonZeroU8, ops::Range, ptr::NonNull}; +/// The status code provided to the buffer mapping callback. +/// +/// This is very similar to `Result<(), BufferAccessError>`, except that this is FFI-friendly. #[repr(C)] #[derive(Debug)] pub enum BufferMapAsyncStatus { + /// The Buffer is sucessfully mapped, `get_mapped_range` can be called. + /// + /// All other variants of this enum represent failures to map the buffer. Success, + /// The buffer is already mapped. + /// + /// While this is treated as an error, it does not prevent mapped range from being accessed. + AlreadyMapped, + /// Mapping was already requested. + MapAlreadyPending, + /// An unknown error. Error, + /// Mapping was aborted (by unmapping or destroying the buffer before mapping + /// happened). Aborted, - Unknown, + /// The context is Lost. ContextLost, + /// The buffer is in an invalid state. + Invalid, + /// The range isn't fully contained in the buffer. + InvalidRange, + /// The range isn't properly aligned. + InvalidAlignment, + /// Incompatible usage flags. + InvalidUsageFlags, } pub(crate) enum BufferMapState { @@ -56,7 +79,7 @@ unsafe impl Send for BufferMapCallbackC {} pub struct BufferMapCallback { // We wrap this so creating the enum in the C variant can be unsafe, // allowing our call function to be safe. - inner: BufferMapCallbackInner, + inner: Option, } enum BufferMapCallbackInner { @@ -71,33 +94,41 @@ enum BufferMapCallbackInner { impl BufferMapCallback { pub fn from_rust(callback: Box) -> Self { Self { - inner: BufferMapCallbackInner::Rust { callback }, + inner: Some(BufferMapCallbackInner::Rust { callback }), } } /// # Safety /// /// - The callback pointer must be valid to call with the provided user_data pointer. - /// - Both pointers must point to 'static data as the callback may happen at an unspecified time. + /// - Both pointers must point to valid memory until the callback is invoked, which may happen at an unspecified time. pub unsafe fn from_c(inner: BufferMapCallbackC) -> Self { Self { - inner: BufferMapCallbackInner::C { inner }, + inner: Some(BufferMapCallbackInner::C { inner }), } } - pub(crate) fn call(self, status: BufferMapAsyncStatus) { - match self.inner { - BufferMapCallbackInner::Rust { callback } => callback(status), + pub(crate) fn call(mut self, status: BufferMapAsyncStatus) { + match self.inner.take() { + Some(BufferMapCallbackInner::Rust { callback }) => { + callback(status); + } // SAFETY: the contract of the call to from_c says that this unsafe is sound. - BufferMapCallbackInner::C { inner } => unsafe { - (inner.callback)(status, inner.user_data) + Some(BufferMapCallbackInner::C { inner }) => unsafe { + (inner.callback)(status, inner.user_data); }, + None => { + panic!("Map callback invoked twice"); + } } } +} - pub(crate) fn call_error(self) { - log::error!("wgpu_buffer_map_async failed: buffer mapping is pending"); - self.call(BufferMapAsyncStatus::Error); +impl Drop for BufferMapCallback { + fn drop(&mut self) { + if self.inner.is_some() { + panic!("Map callback was leaked"); + } } } @@ -116,6 +147,8 @@ pub enum BufferAccessError { Destroyed, #[error("buffer is already mapped")] AlreadyMapped, + #[error("buffer map is pending")] + MapAlreadyPending, #[error(transparent)] MissingBufferUsage(#[from] MissingBufferUsageError), #[error("buffer is not mapped")]