From 2d366428cc50d1f20c139ffc854a603de1c1470c Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Wed, 17 Aug 2016 04:13:43 +0300 Subject: [PATCH 1/2] Properly invalidate the early exit cache Fixes #35737 --- src/librustc_mir/build/scope.rs | 43 +++++++++++---------- src/test/run-pass/mir_early_return_scope.rs | 37 ++++++++++++++++++ 2 files changed, 60 insertions(+), 20 deletions(-) create mode 100644 src/test/run-pass/mir_early_return_scope.rs diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index cae9e8379897c..ca9e108bb415a 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -198,8 +198,11 @@ impl<'tcx> Scope<'tcx> { /// /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a /// larger extent of code. - fn invalidate_cache(&mut self) { - self.cached_exits = FnvHashMap(); + /// + /// `unwind` controls whether caches for the unwind branch are also invalidated. + fn invalidate_cache(&mut self, unwind: bool) { + self.cached_exits.clear(); + if !unwind { return; } for dropdata in &mut self.drops { if let DropKind::Value { ref mut cached_block } = dropdata.kind { *cached_block = None; @@ -455,25 +458,29 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }; for scope in self.scopes.iter_mut().rev() { - if scope.extent == extent { + let this_scope = scope.extent == extent; + // We must invalidate all the caches leading up to the scope we’re looking for, because + // the cached blocks will branch into build of scope not containing the new drop. If we + // add stuff to the currently inspected scope, then in some cases the non-unwind caches + // may become invalid, therefore we should invalidate these as well. The unwind caches + // will stay correct, because the already generated unwind blocks cannot be influenced + // by just added drop. + // + // If we’re scheduling cleanup for non-droppable type (i.e. DropKind::Storage), then we + // do not need to invalidate unwind branch, because DropKind::Storage does not end up + // built in the unwind branch currently. + let invalidate_unwind = needs_drop && !this_scope; + scope.invalidate_cache(invalidate_unwind); + if this_scope { if let DropKind::Value { .. } = drop_kind { scope.needs_cleanup = true; } - - // No need to invalidate any caches here. The just-scheduled drop will branch into - // the drop that comes before it in the vector. scope.drops.push(DropData { span: span, location: lvalue.clone(), kind: drop_kind }); return; - } else { - // We must invalidate all the cached_blocks leading up to the scope we’re - // looking for, because all of the blocks in the chain will become incorrect. - if let DropKind::Value { .. } = drop_kind { - scope.invalidate_cache() - } } } span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue); @@ -490,11 +497,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { value: &Lvalue<'tcx>, item_ty: Ty<'tcx>) { for scope in self.scopes.iter_mut().rev() { + // We must invalidate all the caches leading up to and including the scope we’re + // looking for, because otherwise some of the blocks in the chain will become + // incorrect and must be rebuilt. + scope.invalidate_cache(true); if scope.extent == extent { assert!(scope.free.is_none(), "scope already has a scheduled free!"); - // We also must invalidate the caches in the scope for which the free is scheduled - // because the drops must branch into the free we schedule here. - scope.invalidate_cache(); scope.needs_cleanup = true; scope.free = Some(FreeData { span: span, @@ -503,11 +511,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { cached_block: None }); return; - } else { - // We must invalidate all the cached_blocks leading up to the scope we’re looking - // for, because otherwise some/most of the blocks in the chain will become - // incorrect. - scope.invalidate_cache(); } } span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value); diff --git a/src/test/run-pass/mir_early_return_scope.rs b/src/test/run-pass/mir_early_return_scope.rs new file mode 100644 index 0000000000000..c27e57358b09b --- /dev/null +++ b/src/test/run-pass/mir_early_return_scope.rs @@ -0,0 +1,37 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +static mut DROP: bool = false; + +struct ConnWrap(Conn); +impl ::std::ops::Deref for ConnWrap { + type Target=Conn; + fn deref(&self) -> &Conn { &self.0 } +} + +struct Conn; +impl Drop for Conn { + fn drop(&mut self) { unsafe { DROP = true; } } +} + +fn inner() { + let conn = &*match Some(ConnWrap(Conn)) { + Some(val) => val, + None => return, + }; + return; +} + +fn main() { + inner(); + unsafe { + assert_eq!(DROP, true); + } +} From 2c3250adfa4676467f9ab31201a26187a66cbe0b Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Thu, 18 Aug 2016 00:38:30 +0300 Subject: [PATCH 2/2] Nice graphs --- src/librustc_mir/build/scope.rs | 60 ++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 12 deletions(-) diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index ca9e108bb415a..dc1d63a291118 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -459,16 +459,52 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { for scope in self.scopes.iter_mut().rev() { let this_scope = scope.extent == extent; - // We must invalidate all the caches leading up to the scope we’re looking for, because - // the cached blocks will branch into build of scope not containing the new drop. If we - // add stuff to the currently inspected scope, then in some cases the non-unwind caches - // may become invalid, therefore we should invalidate these as well. The unwind caches - // will stay correct, because the already generated unwind blocks cannot be influenced - // by just added drop. + // When building drops, we try to cache chains of drops in such a way so these drops + // could be reused by the drops which would branch into the cached (already built) + // blocks. This, however, means that whenever we add a drop into a scope which already + // had some blocks built (and thus, cached) for it, we must invalidate all caches which + // might branch into the scope which had a drop just added to it. This is necessary, + // because otherwise some other code might use the cache to branch into already built + // chain of drops, essentially ignoring the newly added drop. // - // If we’re scheduling cleanup for non-droppable type (i.e. DropKind::Storage), then we - // do not need to invalidate unwind branch, because DropKind::Storage does not end up - // built in the unwind branch currently. + // For example consider there’s two scopes with a drop in each. These are built and + // thus the caches are filled: + // + // +--------------------------------------------------------+ + // | +---------------------------------+ | + // | | +--------+ +-------------+ | +---------------+ | + // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | | + // | | +--------+ +-------------+ | +---------------+ | + // | +------------|outer_scope cache|--+ | + // +------------------------------|middle_scope cache|------+ + // + // Now, a new, inner-most scope is added along with a new drop into both inner-most and + // outer-most scopes: + // + // +------------------------------------------------------------+ + // | +----------------------------------+ | + // | | +--------+ +-------------+ | +---------------+ | +-------------+ + // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) | + // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+ + // | | +-+ +-------------+ | | + // | +---|invalid outer_scope cache|----+ | + // +----=----------------|invalid middle_scope cache|-----------+ + // + // If, when adding `drop(new)` we do not invalidate the cached blocks for both + // outer_scope and middle_scope, then, when building drops for the inner (right-most) + // scope, the old, cached blocks, without `drop(new)` will get used, producing the + // wrong results. + // + // The cache and its invalidation for unwind branch is somewhat special. The cache is + // per-drop, rather than per scope, which has a several different implications. Adding + // a new drop into a scope will not invalidate cached blocks of the prior drops in the + // scope. That is true, because none of the already existing drops will have an edge + // into a block with the newly added drop. + // + // Note that this code iterates scopes from the inner-most to the outer-most, + // invalidating caches of each scope visited. This way bare minimum of the + // caches gets invalidated. i.e. if a new drop is added into the middle scope, the + // cache of outer scpoe stays intact. let invalidate_unwind = needs_drop && !this_scope; scope.invalidate_cache(invalidate_unwind); if this_scope { @@ -497,9 +533,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { value: &Lvalue<'tcx>, item_ty: Ty<'tcx>) { for scope in self.scopes.iter_mut().rev() { - // We must invalidate all the caches leading up to and including the scope we’re - // looking for, because otherwise some of the blocks in the chain will become - // incorrect and must be rebuilt. + // See the comment in schedule_drop above. The primary difference is that we invalidate + // the unwind blocks unconditionally. That’s because the box free may be considered + // outer-most cleanup within the scope. scope.invalidate_cache(true); if scope.extent == extent { assert!(scope.free.is_none(), "scope already has a scheduled free!");