@@ -198,8 +198,11 @@ impl<'tcx> Scope<'tcx> {
198198 ///
199199 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
200200 /// larger extent of code.
201- fn invalidate_cache ( & mut self ) {
202- self . cached_exits = FnvHashMap ( ) ;
201+ ///
202+ /// `unwind` controls whether caches for the unwind branch are also invalidated.
203+ fn invalidate_cache ( & mut self , unwind : bool ) {
204+ self . cached_exits . clear ( ) ;
205+ if !unwind { return ; }
203206 for dropdata in & mut self . drops {
204207 if let DropKind :: Value { ref mut cached_block } = dropdata. kind {
205208 * cached_block = None ;
@@ -455,25 +458,29 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
455458 } ;
456459
457460 for scope in self . scopes . iter_mut ( ) . rev ( ) {
458- if scope. extent == extent {
461+ let this_scope = scope. extent == extent;
462+ // We must invalidate all the caches leading up to the scope we’re looking for, because
463+ // the cached blocks will branch into build of scope not containing the new drop. If we
464+ // add stuff to the currently inspected scope, then in some cases the non-unwind caches
465+ // may become invalid, therefore we should invalidate these as well. The unwind caches
466+ // will stay correct, because the already generated unwind blocks cannot be influenced
467+ // by just added drop.
468+ //
469+ // If we’re scheduling cleanup for non-droppable type (i.e. DropKind::Storage), then we
470+ // do not need to invalidate unwind branch, because DropKind::Storage does not end up
471+ // built in the unwind branch currently.
472+ let invalidate_unwind = needs_drop && !this_scope;
473+ scope. invalidate_cache ( invalidate_unwind) ;
474+ if this_scope {
459475 if let DropKind :: Value { .. } = drop_kind {
460476 scope. needs_cleanup = true ;
461477 }
462-
463- // No need to invalidate any caches here. The just-scheduled drop will branch into
464- // the drop that comes before it in the vector.
465478 scope. drops . push ( DropData {
466479 span : span,
467480 location : lvalue. clone ( ) ,
468481 kind : drop_kind
469482 } ) ;
470483 return ;
471- } else {
472- // We must invalidate all the cached_blocks leading up to the scope we’re
473- // looking for, because all of the blocks in the chain will become incorrect.
474- if let DropKind :: Value { .. } = drop_kind {
475- scope. invalidate_cache ( )
476- }
477484 }
478485 }
479486 span_bug ! ( span, "extent {:?} not in scope to drop {:?}" , extent, lvalue) ;
@@ -490,11 +497,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
490497 value : & Lvalue < ' tcx > ,
491498 item_ty : Ty < ' tcx > ) {
492499 for scope in self . scopes . iter_mut ( ) . rev ( ) {
500+ // We must invalidate all the caches leading up to and including the scope we’re
501+ // looking for, because otherwise some of the blocks in the chain will become
502+ // incorrect and must be rebuilt.
503+ scope. invalidate_cache ( true ) ;
493504 if scope. extent == extent {
494505 assert ! ( scope. free. is_none( ) , "scope already has a scheduled free!" ) ;
495- // We also must invalidate the caches in the scope for which the free is scheduled
496- // because the drops must branch into the free we schedule here.
497- scope. invalidate_cache ( ) ;
498506 scope. needs_cleanup = true ;
499507 scope. free = Some ( FreeData {
500508 span : span,
@@ -503,11 +511,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
503511 cached_block : None
504512 } ) ;
505513 return ;
506- } else {
507- // We must invalidate all the cached_blocks leading up to the scope we’re looking
508- // for, because otherwise some/most of the blocks in the chain will become
509- // incorrect.
510- scope. invalidate_cache ( ) ;
511514 }
512515 }
513516 span_bug ! ( span, "extent {:?} not in scope to free {:?}" , extent, value) ;
0 commit comments