diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index ded8c41da8..54627cd650 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -486,10 +486,15 @@ impl Global { let hub = A::hub(self); - let mut buffer_guard = hub.buffers.write(); - let buffer = buffer_guard - .get_and_mark_destroyed(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; + let buffer = { + let mut buffer_guard = hub.buffers.write(); + buffer_guard + .get_and_mark_destroyed(buffer_id) + .map_err(|_| resource::DestroyError::Invalid)? + }; + + let _ = buffer.unmap(); + buffer.destroy() } @@ -499,37 +504,40 @@ impl Global { let hub = A::hub(self); - if let Some(buffer) = hub.buffers.unregister(buffer_id) { - if buffer.ref_count() == 1 { - buffer.destroy().ok(); + let buffer = match hub.buffers.unregister(buffer_id) { + Some(buffer) => buffer, + None => { + return; } + }; - let last_submit_index = buffer.info.submission_index(); + let _ = buffer.unmap(); - let device = buffer.device.clone(); + let last_submit_index = buffer.info.submission_index(); - if device - .pending_writes - .lock() - .as_ref() - .unwrap() - .dst_buffers - .contains_key(&buffer_id) - { - device.lock_life().future_suspected_buffers.push(buffer); - } else { - device - .lock_life() - .suspected_resources - .buffers - .insert(buffer_id, buffer); - } + let device = buffer.device.clone(); - if wait { - match device.wait_for_submit(last_submit_index) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for buffer {:?}: {}", buffer_id, e), - } + if device + .pending_writes + .lock() + .as_ref() + .unwrap() + .dst_buffers + .contains_key(&buffer_id) + { + device.lock_life().future_suspected_buffers.push(buffer); + } else { + device + .lock_life() + .suspected_resources + .buffers + .insert(buffer_id, buffer); + } + + if wait { + match device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for buffer {:?}: {}", buffer_id, e), } } } @@ -2503,27 +2511,17 @@ impl Global { profiling::scope!("unmap", "Buffer"); api_log!("Buffer::unmap {buffer_id:?}"); - let closure; - { - // Restrict the locks to this scope. - let hub = A::hub(self); + let hub = A::hub(self); - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - if !buffer.device.is_valid() { - return Err(DeviceError::Lost.into()); - } - closure = buffer.buffer_unmap_inner() - } + let buffer = hub + .buffers + .get(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; - // Note: outside the scope where locks are held when calling the callback - if let Some((mut operation, status)) = closure? { - if let Some(callback) = operation.callback.take() { - callback.call(status); - } + if !buffer.device.is_valid() { + return Err(DeviceError::Lost.into()); } - Ok(()) + + buffer.unmap() } } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 3e0f790d1e..31f14eeeef 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -423,9 +423,18 @@ impl Buffer { self.raw.get(guard).unwrap() } - pub(crate) fn buffer_unmap_inner( - self: &Arc, - ) -> Result, BufferAccessError> { + // Note: This must not be called while holding a lock. + pub(crate) fn unmap(self: &Arc) -> Result<(), BufferAccessError> { + if let Some((mut operation, status)) = self.unmap_inner()? { + if let Some(callback) = operation.callback.take() { + callback.call(status); + } + } + + Ok(()) + } + + fn unmap_inner(self: &Arc) -> Result, BufferAccessError> { use hal::Device; let device = &self.device; @@ -533,43 +542,30 @@ impl Buffer { } pub(crate) fn destroy(self: &Arc) -> Result<(), DestroyError> { - let map_closure; - // Restrict the locks to this scope. - { - let device = &self.device; - let buffer_id = self.info.id(); - - map_closure = self.buffer_unmap_inner().unwrap_or(None); - - #[cfg(feature = "trace")] - if let Some(ref mut trace) = *device.trace.lock() { - trace.add(trace::Action::FreeBuffer(buffer_id)); - } - // Note: a future commit will replace this with a read guard - // and actually snatch the buffer. - let snatch_guard = device.snatchable_lock.read(); - if self.raw.get(&snatch_guard).is_none() { - return Err(resource::DestroyError::AlreadyDestroyed); - } + let device = &self.device; + let buffer_id = self.info.id(); - let temp = queue::TempResource::Buffer(self.clone()); - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); - if pending_writes.dst_buffers.contains_key(&buffer_id) { - pending_writes.temp_resources.push(temp); - } else { - let last_submit_index = self.info.submission_index(); - device - .lock_life() - .schedule_resource_destruction(temp, last_submit_index); - } + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::FreeBuffer(buffer_id)); + } + // Note: a future commit will replace this with a read guard + // and actually snatch the buffer. + let snatch_guard = device.snatchable_lock.read(); + if self.raw.get(&snatch_guard).is_none() { + return Err(resource::DestroyError::AlreadyDestroyed); } - // Note: outside the scope where locks are held when calling the callback - if let Some((mut operation, status)) = map_closure { - if let Some(callback) = operation.callback.take() { - callback.call(status); - } + let temp = queue::TempResource::Buffer(self.clone()); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + if pending_writes.dst_buffers.contains_key(&buffer_id) { + pending_writes.temp_resources.push(temp); + } else { + let last_submit_index = self.info.submission_index(); + device + .lock_life() + .schedule_resource_destruction(temp, last_submit_index); } Ok(())