@@ -198,8 +198,11 @@ impl<'tcx> Scope<'tcx> {
198
198
///
199
199
/// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
200
200
/// larger extent of code.
201
- fn invalidate_cache ( & mut self ) {
202
- self . cached_exits = FnvHashMap ( ) ;
201
+ ///
202
+ /// `unwind` controls whether caches for the unwind branch are also invalidated.
203
+ fn invalidate_cache ( & mut self , unwind : bool ) {
204
+ self . cached_exits . clear ( ) ;
205
+ if !unwind { return ; }
203
206
for dropdata in & mut self . drops {
204
207
if let DropKind :: Value { ref mut cached_block } = dropdata. kind {
205
208
* cached_block = None ;
@@ -455,25 +458,65 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
455
458
} ;
456
459
457
460
for scope in self . scopes . iter_mut ( ) . rev ( ) {
458
- if scope. extent == extent {
461
+ let this_scope = scope. extent == extent;
462
+ // When building drops, we try to cache chains of drops in such a way so these drops
463
+ // could be reused by the drops which would branch into the cached (already built)
464
+ // blocks. This, however, means that whenever we add a drop into a scope which already
465
+ // had some blocks built (and thus, cached) for it, we must invalidate all caches which
466
+ // might branch into the scope which had a drop just added to it. This is necessary,
467
+ // because otherwise some other code might use the cache to branch into already built
468
+ // chain of drops, essentially ignoring the newly added drop.
469
+ //
470
+ // For example consider there’s two scopes with a drop in each. These are built and
471
+ // thus the caches are filled:
472
+ //
473
+ // +--------------------------------------------------------+
474
+ // | +---------------------------------+ |
475
+ // | | +--------+ +-------------+ | +---------------+ |
476
+ // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
477
+ // | | +--------+ +-------------+ | +---------------+ |
478
+ // | +------------|outer_scope cache|--+ |
479
+ // +------------------------------|middle_scope cache|------+
480
+ //
481
+ // Now, a new, inner-most scope is added along with a new drop into both inner-most and
482
+ // outer-most scopes:
483
+ //
484
+ // +------------------------------------------------------------+
485
+ // | +----------------------------------+ |
486
+ // | | +--------+ +-------------+ | +---------------+ | +-------------+
487
+ // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
488
+ // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
489
+ // | | +-+ +-------------+ | |
490
+ // | +---|invalid outer_scope cache|----+ |
491
+ // +----=----------------|invalid middle_scope cache|-----------+
492
+ //
493
+ // If, when adding `drop(new)` we do not invalidate the cached blocks for both
494
+ // outer_scope and middle_scope, then, when building drops for the inner (right-most)
495
+ // scope, the old, cached blocks, without `drop(new)` will get used, producing the
496
+ // wrong results.
497
+ //
498
+ // The cache and its invalidation for unwind branch is somewhat special. The cache is
499
+ // per-drop, rather than per scope, which has a several different implications. Adding
500
+ // a new drop into a scope will not invalidate cached blocks of the prior drops in the
501
+ // scope. That is true, because none of the already existing drops will have an edge
502
+ // into a block with the newly added drop.
503
+ //
504
+ // Note that this code iterates scopes from the inner-most to the outer-most,
505
+ // invalidating caches of each scope visited. This way bare minimum of the
506
+ // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
507
+ // cache of outer scpoe stays intact.
508
+ let invalidate_unwind = needs_drop && !this_scope;
509
+ scope. invalidate_cache ( invalidate_unwind) ;
510
+ if this_scope {
459
511
if let DropKind :: Value { .. } = drop_kind {
460
512
scope. needs_cleanup = true ;
461
513
}
462
-
463
- // No need to invalidate any caches here. The just-scheduled drop will branch into
464
- // the drop that comes before it in the vector.
465
514
scope. drops . push ( DropData {
466
515
span : span,
467
516
location : lvalue. clone ( ) ,
468
517
kind : drop_kind
469
518
} ) ;
470
519
return ;
471
- } else {
472
- // We must invalidate all the cached_blocks leading up to the scope we’re
473
- // looking for, because all of the blocks in the chain will become incorrect.
474
- if let DropKind :: Value { .. } = drop_kind {
475
- scope. invalidate_cache ( )
476
- }
477
520
}
478
521
}
479
522
span_bug ! ( span, "extent {:?} not in scope to drop {:?}" , extent, lvalue) ;
@@ -490,11 +533,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
490
533
value : & Lvalue < ' tcx > ,
491
534
item_ty : Ty < ' tcx > ) {
492
535
for scope in self . scopes . iter_mut ( ) . rev ( ) {
536
+ // See the comment in schedule_drop above. The primary difference is that we invalidate
537
+ // the unwind blocks unconditionally. That’s because the box free may be considered
538
+ // outer-most cleanup within the scope.
539
+ scope. invalidate_cache ( true ) ;
493
540
if scope. extent == extent {
494
541
assert ! ( scope. free. is_none( ) , "scope already has a scheduled free!" ) ;
495
- // We also must invalidate the caches in the scope for which the free is scheduled
496
- // because the drops must branch into the free we schedule here.
497
- scope. invalidate_cache ( ) ;
498
542
scope. needs_cleanup = true ;
499
543
scope. free = Some ( FreeData {
500
544
span : span,
@@ -503,11 +547,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
503
547
cached_block : None
504
548
} ) ;
505
549
return ;
506
- } else {
507
- // We must invalidate all the cached_blocks leading up to the scope we’re looking
508
- // for, because otherwise some/most of the blocks in the chain will become
509
- // incorrect.
510
- scope. invalidate_cache ( ) ;
511
550
}
512
551
}
513
552
span_bug ! ( span, "extent {:?} not in scope to free {:?}" , extent, value) ;
0 commit comments