From 43ae1763c773b5121b0b2d69a7326a7ec27567af Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Wed, 7 Feb 2024 14:54:32 +0100 Subject: [PATCH 1/4] Prevent a deadlock when destroying resources It's risky to get write access through the snatchlock from a drop implementation since the snatch lock is typically held for large scopes. This commit makes it so we deffer snatching some resources to when the device is polled and we know the snatch lock is not held. --- wgpu-core/src/device/global.rs | 15 ++++-- wgpu-core/src/device/resource.rs | 78 ++++++++++++++++++++++++++++---- wgpu-core/src/resource.rs | 56 ++++++----------------- 3 files changed, 92 insertions(+), 57 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index cf3645711a..e5e046e0f1 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -2107,6 +2107,12 @@ impl Global { ) -> Result { api_log!("Device::poll"); + let hub = A::hub(self); + let device = hub + .devices + .get(device_id) + .map_err(|_| DeviceError::Invalid)?; + let (closures, queue_empty) = { if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain { if submission_index.queue_id != device_id.transmute() { @@ -2117,16 +2123,15 @@ impl Global { } } - let hub = A::hub(self); - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::Invalid)?; let fence = device.fence.read(); let fence = fence.as_ref().unwrap(); device.maintain(fence, maintain)? }; + // Some deferred destroys are scheduled in maintain so run this right after + // to avoid holding on to them until next the device is polled. + device.deferred_resource_destruction(); + closures.fire(); Ok(queue_empty) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index ade64bcb00..ecaae868ce 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -1,12 +1,13 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ - binding_model::{self, BindGroupLayout, BindGroupLayoutEntryError}, + binding_model::{self, BindGroup, BindGroupLayout, BindGroupLayoutEntryError}, command, conv, - device::life::{LifetimeTracker, WaitIdleError}, - device::queue::PendingWrites, device::{ - bgl, AttachmentData, CommandAllocator, DeviceLostInvocation, MissingDownlevelFlags, + bgl, + life::{LifetimeTracker, WaitIdleError}, + queue::PendingWrites, + AttachmentData, CommandAllocator, DeviceLostInvocation, MissingDownlevelFlags, MissingFeatures, RenderPassContext, CLEANUP_WAIT_MS, }, hal_api::HalApi, @@ -21,10 +22,9 @@ use crate::{ pipeline, pool::ResourcePool, registry::Registry, - resource::ResourceInfo, resource::{ - self, Buffer, QuerySet, Resource, ResourceType, Sampler, Texture, TextureView, - TextureViewNotRenderableReason, + self, Buffer, QuerySet, Resource, ResourceInfo, ResourceType, Sampler, Texture, + TextureView, TextureViewNotRenderableReason, }, resource_log, snatch::{SnatchGuard, SnatchLock, Snatchable}, @@ -48,7 +48,7 @@ use std::{ num::NonZeroU32, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, + Arc, Weak, }, }; @@ -129,10 +129,16 @@ pub struct Device { pub(crate) downlevel: wgt::DownlevelCapabilities, pub(crate) instance_flags: wgt::InstanceFlags, pub(crate) pending_writes: Mutex>>, + pub(crate) deferred_destroy: Mutex>>, #[cfg(feature = "trace")] pub(crate) trace: Mutex>, } +pub(crate) enum DeferredDestroy { + TextureView(Weak>), + BindGroup(Weak>), +} + impl std::fmt::Debug for Device { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Device") @@ -285,6 +291,7 @@ impl Device { downlevel, instance_flags, pending_writes: Mutex::new(Some(pending_writes)), + deferred_destroy: Mutex::new(Vec::new()), }) } @@ -300,6 +307,56 @@ impl Device { self.life_tracker.lock() } + /// Run some destroy operations that were deferred. + /// + /// Destroying the resources requires taking a write lock on the device's snatch lock, + /// so a good reason for deferring resource destruction is when we dont' know for sure + /// how risky it is to take the lock (typically, it shouldn't be taken from the drop + /// impementation of a reference-counted strucure). + /// The snatch lock must not be held while this function is called. + pub(crate) fn deferred_resource_destruction(&self) { + while let Some(item) = self.deferred_destroy.lock().pop() { + match item { + DeferredDestroy::TextureView(view) => { + let Some(view) = view.upgrade() else { + continue; + }; + let Some(raw_view) = view.raw.snatch(self.snatchable_lock.write()) else { + continue; + }; + + resource_log!("Destroy raw TextureView (destroyed) {:?}", view.label()); + #[cfg(feature = "trace")] + if let Some(t) = self.trace.lock().as_mut() { + t.add(trace::Action::DestroyTextureView(view.info.id())); + } + unsafe { + use hal::Device; + self.raw().destroy_texture_view(raw_view); + } + } + DeferredDestroy::BindGroup(bind_group) => { + let Some(bind_group) = bind_group.upgrade() else { + continue; + }; + let Some(raw_bind_group) = bind_group.raw.snatch(self.snatchable_lock.write()) else { + continue; + }; + + resource_log!("Destroy raw BindGroup (destroyed) {:?}", bind_group.label()); + #[cfg(feature = "trace")] + if let Some(t) = self.trace.lock().as_mut() { + t.add(trace::Action::DestroyBindGroup(bind_group.info.id())); + } + unsafe { + use hal::Device; + self.raw().destroy_bind_group(raw_bind_group); + } + } + } + } + } + /// Check this device for completed commands. /// /// The `maintain` argument tells how the maintence function should behave, either @@ -319,6 +376,7 @@ impl Device { maintain: wgt::Maintain, ) -> Result<(UserClosures, bool), WaitIdleError> { profiling::scope!("Device::maintain"); + let last_done_index = if maintain.is_wait() { let index_to_wait_for = match maintain { wgt::Maintain::WaitForSubmissionIndex(submission_index) => { @@ -1972,7 +2030,7 @@ impl Device { layout: &Arc>, desc: &binding_model::BindGroupDescriptor, hub: &Hub, - ) -> Result, binding_model::CreateBindGroupError> { + ) -> Result, binding_model::CreateBindGroupError> { use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error}; { // Check that the number of entries in the descriptor matches @@ -2212,7 +2270,7 @@ impl Device { .map_err(DeviceError::from)? }; - Ok(binding_model::BindGroup { + Ok(BindGroup { raw: Snatchable::new(raw), device: self.clone(), layout: layout.clone(), diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 3c631d6a5d..de5d1868a3 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -3,8 +3,8 @@ use crate::device::trace; use crate::{ binding_model::BindGroup, device::{ - queue, BufferMapPendingClosure, Device, DeviceError, HostMap, MissingDownlevelFlags, - MissingFeatures, + queue, resource::DeferredDestroy, BufferMapPendingClosure, Device, DeviceError, HostMap, + MissingDownlevelFlags, MissingFeatures, }, global::Global, hal_api::HalApi, @@ -604,29 +604,6 @@ impl Resource for Buffer { } } -fn snatch_and_destroy_bind_groups( - device: &Device, - bind_groups: &[Weak>], -) { - for bind_group in bind_groups { - if let Some(bind_group) = bind_group.upgrade() { - if let Some(raw_bind_group) = bind_group.raw.snatch(device.snatchable_lock.write()) { - resource_log!("Destroy raw BindGroup (destroyed) {:?}", bind_group.label()); - - #[cfg(feature = "trace")] - if let Some(t) = device.trace.lock().as_mut() { - t.add(trace::Action::DestroyBindGroup(bind_group.info.id())); - } - - unsafe { - use hal::Device; - device.raw().destroy_bind_group(raw_bind_group); - } - } - } - } -} - /// A buffer that has been marked as destroyed and is staged for actual deletion soon. #[derive(Debug)] pub struct DestroyedBuffer { @@ -650,7 +627,11 @@ impl DestroyedBuffer { impl Drop for DestroyedBuffer { fn drop(&mut self) { - snatch_and_destroy_bind_groups(&self.device, &self.bind_groups); + let mut deferred = self.device.deferred_destroy.lock(); + for bind_group in self.bind_groups.drain(..) { + deferred.push(DeferredDestroy::BindGroup(bind_group)); + } + drop(deferred); if let Some(raw) = self.raw.take() { resource_log!("Destroy raw Buffer (destroyed) {:?}", self.label()); @@ -1038,25 +1019,16 @@ impl DestroyedTexture { impl Drop for DestroyedTexture { fn drop(&mut self) { let device = &self.device; - snatch_and_destroy_bind_groups(device, &self.bind_groups); + let mut deferred = device.deferred_destroy.lock(); for view in self.views.drain(..) { - if let Some(view) = view.upgrade() { - if let Some(raw_view) = view.raw.snatch(device.snatchable_lock.write()) { - resource_log!("Destroy raw TextureView (destroyed) {:?}", view.label()); - - #[cfg(feature = "trace")] - if let Some(t) = self.device.trace.lock().as_mut() { - t.add(trace::Action::DestroyTextureView(view.info.id())); - } - - unsafe { - use hal::Device; - self.device.raw().destroy_texture_view(raw_view); - } - } - } + deferred.push(DeferredDestroy::TextureView(view)); } + for bind_group in self.bind_groups.drain(..) { + deferred.push(DeferredDestroy::BindGroup(bind_group)); + } + drop(deferred); + if let Some(raw) = self.raw.take() { resource_log!("Destroy raw Texture (destroyed) {:?}", self.label()); From 71255e0f3dc23df8556defb7ac29f2c8714c94fd Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Wed, 7 Feb 2024 15:11:53 +0100 Subject: [PATCH 2/4] typo --- wgpu-core/src/device/resource.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index ecaae868ce..b5728f016d 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -312,7 +312,7 @@ impl Device { /// Destroying the resources requires taking a write lock on the device's snatch lock, /// so a good reason for deferring resource destruction is when we dont' know for sure /// how risky it is to take the lock (typically, it shouldn't be taken from the drop - /// impementation of a reference-counted strucure). + /// implementation of a reference-counted structure). /// The snatch lock must not be held while this function is called. pub(crate) fn deferred_resource_destruction(&self) { while let Some(item) = self.deferred_destroy.lock().pop() { From 5fcce0c86b563599de00e7cfa63bb529ef7c3b1c Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Wed, 7 Feb 2024 15:55:32 +0100 Subject: [PATCH 3/4] fixup --- wgpu-core/src/device/global.rs | 2 +- wgpu-core/src/device/resource.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index e5e046e0f1..64fd6d4de7 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -2129,7 +2129,7 @@ impl Global { }; // Some deferred destroys are scheduled in maintain so run this right after - // to avoid holding on to them until next the device is polled. + // to avoid holding on to them until the next device poll. device.deferred_resource_destruction(); closures.fire(); diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index b5728f016d..84bdc61ad1 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -376,7 +376,6 @@ impl Device { maintain: wgt::Maintain, ) -> Result<(UserClosures, bool), WaitIdleError> { profiling::scope!("Device::maintain"); - let last_done_index = if maintain.is_wait() { let index_to_wait_for = match maintain { wgt::Maintain::WaitForSubmissionIndex(submission_index) => { From 870645d51893e900e0ffa3094a2a077c473294b9 Mon Sep 17 00:00:00 2001 From: Erich Gubler Date: Wed, 7 Feb 2024 10:56:29 -0500 Subject: [PATCH 4/4] fixup! Prevent a deadlock when destroying resources --- wgpu-core/src/device/resource.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 84bdc61ad1..b2c85a056a 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -310,7 +310,7 @@ impl Device { /// Run some destroy operations that were deferred. /// /// Destroying the resources requires taking a write lock on the device's snatch lock, - /// so a good reason for deferring resource destruction is when we dont' know for sure + /// so a good reason for deferring resource destruction is when we don't know for sure /// how risky it is to take the lock (typically, it shouldn't be taken from the drop /// implementation of a reference-counted structure). /// The snatch lock must not be held while this function is called.