Skip to content

Avoid quadratic growth of functions due to cleanups #31390

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 5, 2016
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Avoid quadratic growth of functions due to cleanups
If a new cleanup is added to a cleanup scope, the cached exits for that
scope are cleared, so all previous cleanups have to be translated
again. In the worst case this means that we get N distinct landing pads
where the last one has N cleanups, then N-1 and so on.

As new cleanups are to be executed before older ones, we can instead
cache the number of already translated cleanups in addition to the
block that contains them, and then only translate new ones, if any and
then jump to the cached ones, getting away with linear growth instead.

For the crate in #31381 this reduces the compile time for an optimized
build from >20 minutes (I cancelled the build at that point) to about 11
seconds. Testing a few crates that come with rustc show compile time
improvements somewhere between 1 and 8%. The "big" winner being
rustc_platform_intrinsics which features code similar to that in #31381.

Fixes #31381
dotdash committed Feb 3, 2016
commit 8c0f4f5d3aa6deac4ef8b371bd043f5c1df2c254
37 changes: 24 additions & 13 deletions src/librustc_trans/trans/cleanup.rs
Original file line number Diff line number Diff line change
@@ -200,6 +200,7 @@ pub enum UnwindKind {
pub struct CachedEarlyExit {
label: EarlyExitLabel,
cleanup_block: BasicBlockRef,
last_cleanup: usize,
}

pub trait Cleanup<'tcx> {
@@ -560,7 +561,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
for scope in self.scopes.borrow_mut().iter_mut().rev() {
if scope.kind.is_ast_with_id(cleanup_scope) {
scope.cleanups.push(cleanup);
scope.clear_cached_exits();
scope.cached_landing_pad = None;
return;
} else {
// will be adding a cleanup to some enclosing scope
@@ -585,7 +586,7 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
let mut scopes = self.scopes.borrow_mut();
let scope = &mut (*scopes)[custom_scope.index];
scope.cleanups.push(cleanup);
scope.clear_cached_exits();
scope.cached_landing_pad = None;
}

/// Returns true if there are pending cleanups that should execute on panic.
@@ -723,6 +724,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
let orig_scopes_len = self.scopes_len();
let mut prev_llbb;
let mut popped_scopes = vec!();
let mut skip = 0;

// First we pop off all the cleanup stacks that are
// traversed until the exit is reached, pushing them
@@ -769,20 +771,25 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
}
}

// Pop off the scope, since we may be generating
// unwinding code for it.
let top_scope = self.pop_scope();
let cached_exit = top_scope.cached_early_exit(label);
popped_scopes.push(top_scope);

// Check if we have already cached the unwinding of this
// scope for this label. If so, we can stop popping scopes
// and branch to the cached label, since it contains the
// cleanups for any subsequent scopes.
if let Some(exit) = self.top_scope(|s| s.cached_early_exit(label)) {
if let Some((exit, last_cleanup)) = cached_exit {
prev_llbb = exit;
skip = last_cleanup;
break;
}

// Pop off the scope, since we will be generating
// unwinding code for it. If we are searching for a loop exit,
// If we are searching for a loop exit,
// and this scope is that loop, then stop popping and set
// `prev_llbb` to the appropriate exit block from the loop.
popped_scopes.push(self.pop_scope());
let scope = popped_scopes.last().unwrap();
match label {
UnwindExit(..) | ReturnExit => { }
@@ -826,13 +833,15 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
let bcx_in = self.new_block(&name[..], None);
let exit_label = label.start(bcx_in);
let mut bcx_out = bcx_in;
for cleanup in scope.cleanups.iter().rev() {
let len = scope.cleanups.len();
for cleanup in scope.cleanups.iter().rev().take(len - skip) {
bcx_out = cleanup.trans(bcx_out, scope.debug_loc);
}
skip = 0;
exit_label.branch(bcx_out, prev_llbb);
prev_llbb = bcx_in.llbb;

scope.add_cached_early_exit(exit_label, prev_llbb);
scope.add_cached_early_exit(exit_label, prev_llbb, len);
}
self.push_scope(scope);
}
@@ -938,18 +947,20 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {

fn cached_early_exit(&self,
label: EarlyExitLabel)
-> Option<BasicBlockRef> {
self.cached_early_exits.iter().
-> Option<(BasicBlockRef, usize)> {
self.cached_early_exits.iter().rev().
find(|e| e.label == label).
map(|e| e.cleanup_block)
map(|e| (e.cleanup_block, e.last_cleanup))
}

fn add_cached_early_exit(&mut self,
label: EarlyExitLabel,
blk: BasicBlockRef) {
blk: BasicBlockRef,
last_cleanup: usize) {
self.cached_early_exits.push(
CachedEarlyExit { label: label,
cleanup_block: blk });
cleanup_block: blk,
last_cleanup: last_cleanup});
}

/// True if this scope has cleanups that need unwinding
47 changes: 47 additions & 0 deletions src/test/codegen/drop.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

// compile-flags: -C no-prepopulate-passes

#![crate_type = "lib"]

struct SomeUniqueName;

impl Drop for SomeUniqueName {
fn drop(&mut self) {
}
}

pub fn possibly_unwinding() {
}

// CHECK-LABEL: @droppy
#[no_mangle]
pub fn droppy() {
// Check that there are exactly 6 drop calls. The cleanups for the unwinding should be reused, so
// that's one new drop call per call to possibly_unwinding(), and finally 3 drop calls for the
// regular function exit. We used to have problems with quadratic growths of drop calls in such
// functions.
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
// CHECK: call{{.*}}SomeUniqueName{{.*}}drop
// CHECK-NOT: call{{.*}}SomeUniqueName{{.*}}drop
// The next line checks for the } that ends the function definition
// CHECK-LABEL: {{^[}]}}
let _s = SomeUniqueName;
possibly_unwinding();
let _s = SomeUniqueName;
possibly_unwinding();
let _s = SomeUniqueName;
possibly_unwinding();
}