From d11d586473cb8ef8c24a618c8d4dd6d52420606a Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 16 Nov 2019 11:35:37 +0000 Subject: [PATCH 01/16] Temp: Initial test update --- src/test/mir-opt/basic_assignment.rs | 8 +- src/test/mir-opt/box_expr.rs | 41 ++--- src/test/mir-opt/const_prop/boxes.rs | 20 +- .../mir-opt/generator-storage-dead-unwind.rs | 69 +++---- src/test/mir-opt/graphviz.rs | 13 +- src/test/mir-opt/issue-38669.rs | 20 +- src/test/mir-opt/issue-49232.rs | 55 +++--- src/test/mir-opt/issue-62289.rs | 76 ++++---- src/test/mir-opt/loop_test.rs | 19 +- src/test/mir-opt/match-arm-scopes.rs | 135 +++++++------- src/test/mir-opt/match_false_edges.rs | 173 +++++++++--------- .../mir-opt/nll/region-subtyping-basic.rs | 6 +- .../mir-opt/no-spurious-drop-after-call.rs | 6 +- .../mir-opt/packed-struct-drop-aligned.rs | 12 +- src/test/mir-opt/retag.rs | 24 +-- src/test/mir-opt/simple-match.rs | 20 +- src/test/mir-opt/simplify_cfg.rs | 10 +- src/test/mir-opt/unusual-item-types.rs | 6 - 18 files changed, 341 insertions(+), 372 deletions(-) diff --git a/src/test/mir-opt/basic_assignment.rs b/src/test/mir-opt/basic_assignment.rs index ca0e9fa811a26..8fcf4d7613f88 100644 --- a/src/test/mir-opt/basic_assignment.rs +++ b/src/test/mir-opt/basic_assignment.rs @@ -39,14 +39,14 @@ fn main() { // StorageLive(_5); // StorageLive(_6); // _6 = move _4; -// replace(_5 <- move _6) -> [return: bb2, unwind: bb5]; +// replace(_5 <- move _6) -> [return: bb1, unwind: bb5]; // } // ... -// bb2: { -// drop(_6) -> [return: bb6, unwind: bb4]; +// bb1: { +// drop(_6) -> [return: bb2, unwind: bb6]; // } // ... // bb5 (cleanup): { -// drop(_6) -> bb4; +// drop(_6) -> bb6; // } // END rustc.main.SimplifyCfg-initial.after.mir diff --git a/src/test/mir-opt/box_expr.rs b/src/test/mir-opt/box_expr.rs index fa1a291858bec..197a46f0eb429 100644 --- a/src/test/mir-opt/box_expr.rs +++ b/src/test/mir-opt/box_expr.rs @@ -33,45 +33,40 @@ impl Drop for S { // StorageLive(_1); // StorageLive(_2); // _2 = Box(S); -// (*_2) = const S::new() -> [return: bb2, unwind: bb3]; +// (*_2) = const S::new() -> [return: bb1, unwind: bb7]; // } -// -// bb1 (cleanup): { -// resume; -// } -// -// bb2: { +// bb1: { // _1 = move _2; -// drop(_2) -> bb4; +// drop(_2) -> bb2; // } -// -// bb3 (cleanup): { -// drop(_2) -> bb1; -// } -// -// bb4: { +// bb2: { // StorageDead(_2); // StorageLive(_3); // StorageLive(_4); // _4 = move _1; -// _3 = const std::mem::drop::>(move _4) -> [return: bb5, unwind: bb7]; +// _3 = const std::mem::drop::>(move _4) -> [return: bb3, unwind: bb5]; // } -// -// bb5: { +// bb3: { // StorageDead(_4); // StorageDead(_3); // _0 = (); -// drop(_1) -> bb8; +// drop(_1) -> bb4; +// } +// bb4: { +// StorageDead(_1); +// return; +// } +// bb5 (cleanup): { +// drop(_4) -> bb6; // } // bb6 (cleanup): { -// drop(_1) -> bb1; +// drop(_1) -> bb8; // } // bb7 (cleanup): { -// drop(_4) -> bb6; +// drop(_2) -> bb8; // } -// bb8: { -// StorageDead(_1); -// return; +// bb8 (cleanup): { +// resume; // } // } // END rustc.main.ElaborateDrops.before.mir diff --git a/src/test/mir-opt/const_prop/boxes.rs b/src/test/mir-opt/const_prop/boxes.rs index cf134dadf2789..fe25d6a0a2d13 100644 --- a/src/test/mir-opt/const_prop/boxes.rs +++ b/src/test/mir-opt/const_prop/boxes.rs @@ -22,16 +22,16 @@ fn main() { // _2 = (*_3); // _1 = Add(move _2, const 0i32); // ... -// drop(_3) -> [return: bb2, unwind: bb1]; +// drop(_3) -> [return: bb1, unwind: bb2]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // ... // _0 = (); // ... // } +// bb2 (cleanup): { +// resume; +// } // END rustc.main.ConstProp.before.mir // START rustc.main.ConstProp.after.mir // bb0: { @@ -43,14 +43,14 @@ fn main() { // _2 = (*_3); // _1 = Add(move _2, const 0i32); // ... -// drop(_3) -> [return: bb2, unwind: bb1]; +// drop(_3) -> [return: bb1, unwind: bb2]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // ... // _0 = (); // ... // } +// bb2 (cleanup): { +// resume; +// } // END rustc.main.ConstProp.after.mir diff --git a/src/test/mir-opt/generator-storage-dead-unwind.rs b/src/test/mir-opt/generator-storage-dead-unwind.rs index 82b216a99cf55..6d0600100b280 100644 --- a/src/test/mir-opt/generator-storage-dead-unwind.rs +++ b/src/test/mir-opt/generator-storage-dead-unwind.rs @@ -49,66 +49,67 @@ fn main() { // StorageLive(_4); // _4 = Bar(const 6i32,); // ... -// _5 = yield(move _6) -> [resume: bb2, drop: bb4]; +// _5 = yield(move _6) -> [resume: bb1, drop: bb5]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // ... // StorageLive(_7); // StorageLive(_8); // _8 = move _3; -// _7 = const take::(move _8) -> [return: bb7, unwind: bb9]; +// _7 = const take::(move _8) -> [return: bb2, unwind: bb11]; // } -// bb3 (cleanup): { -// StorageDead(_3); -// drop(_1) -> bb1; +// bb2: { +// StorageDead(_8); +// StorageDead(_7); +// StorageLive(_9); +// StorageLive(_10); +// _10 = move _4; +// _9 = const take::(move _10) -> [return: bb3, unwind: bb10]; // } -// bb4: { +// bb3: { +// StorageDead(_10); +// StorageDead(_9); // ... // StorageDead(_4); -// drop(_3) -> [return: bb5, unwind: bb3]; +// StorageDead(_3); +// drop(_1) -> [return: bb4, unwind: bb9]; +// } +// bb4: { +// return; // } // bb5: { +// ... // StorageDead(_3); -// drop(_1) -> [return: bb6, unwind: bb1]; +// drop(_1) -> [return: bb6, unwind: bb8]; // } // bb6: { -// generator_drop; +// StorageDead(_3); +// drop(_1) -> [return: bb7, unwind: bb9]; // } // bb7: { -// StorageDead(_8); -// StorageDead(_7); -// StorageLive(_9); -// StorageLive(_10); -// _10 = move _4; -// _9 = const take::(move _10) -> [return: bb10, unwind: bb11]; +// generator_drop; // } // bb8 (cleanup): { -// StorageDead(_4); // StorageDead(_3); -// drop(_1) -> bb1; +// drop(_2) -> bb9; // } // bb9 (cleanup): { -// StorageDead(_8); -// StorageDead(_7); -// goto -> bb8; +// resume; // } -// bb10: { +// bb10 (cleanup): { // StorageDead(_10); // StorageDead(_9); -// ... -// StorageDead(_4); -// StorageDead(_3); -// drop(_1) -> [return: bb12, unwind: bb1]; +// goto -> bb12; // } // bb11 (cleanup): { -// StorageDead(_10); -// StorageDead(_9); -// goto -> bb8; +// StorageDead(_8); +// StorageDead(_7); +// goto -> bb12; // } -// bb12: { -// return; +// bb12 (cleanup): { +// StorageDead(_4); +// StorageDead(_3); +// drop(_1) -> bb9; // } + // END rustc.main-{{closure}}.StateTransform.before.mir diff --git a/src/test/mir-opt/graphviz.rs b/src/test/mir-opt/graphviz.rs index fcbb189c1117b..f253388261a78 100644 --- a/src/test/mir-opt/graphviz.rs +++ b/src/test/mir-opt/graphviz.rs @@ -8,13 +8,10 @@ fn main() {} // END RUST SOURCE // START rustc.main.mir_map.0.dot // digraph Mir_0_3 { // The name here MUST be an ASCII identifier. -// graph [fontname="monospace"]; -// node [fontname="monospace"]; -// edge [fontname="monospace"]; -// label=>; -// bb0__0_3 [shape="none", label=<
0
_0 = ()
goto
>]; -// bb1__0_3 [shape="none", label=<
1
resume
>]; -// bb2__0_3 [shape="none", label=<
2
return
>]; -// bb0__0_3 -> bb2__0_3 [label=""]; +// graph [fontname="monospace"]; +// node [fontname="monospace"]; +// edge [fontname="monospace"]; +// label=>; +// bb0__0_3 [shape="none", label=<
0
_0 = ()
return
>]; // } // END rustc.main.mir_map.0.dot diff --git a/src/test/mir-opt/issue-38669.rs b/src/test/mir-opt/issue-38669.rs index d980cc891dc40..6621707c74b78 100644 --- a/src/test/mir-opt/issue-38669.rs +++ b/src/test/mir-opt/issue-38669.rs @@ -16,35 +16,35 @@ fn main() { // StorageLive(_1); // _1 = const false; // FakeRead(ForLet, _1); -// goto -> bb2; +// goto -> bb1; // } -// bb1 (cleanup): { -// resume; +// bb1: { +// falseUnwind -> [real: bb2, cleanup: bb6]; // } // bb2: { -// falseUnwind -> [real: bb3, cleanup: bb1]; -// } -// bb3: { // StorageLive(_3); // StorageLive(_4); // _4 = _1; // FakeRead(ForMatchedPlace, _4); -// switchInt(_4) -> [false: bb5, otherwise: bb4]; +// switchInt(_4) -> [false: bb4, otherwise: bb3]; // } // ... -// bb5: { +// bb4: { // _3 = (); // StorageDead(_4); // StorageDead(_3); // _1 = const true; // _2 = (); -// goto -> bb2; +// goto -> bb1; // } -// bb6: { +// bb5: { // _0 = (); // StorageDead(_4); // StorageDead(_3); // StorageDead(_1); // return; // } +// bb6 (cleanup): { +// resume; +// } // END rustc.main.SimplifyCfg-initial.after.mir diff --git a/src/test/mir-opt/issue-49232.rs b/src/test/mir-opt/issue-49232.rs index 54c89b85f42d7..50b9971cf9aea 100644 --- a/src/test/mir-opt/issue-49232.rs +++ b/src/test/mir-opt/issue-49232.rs @@ -30,64 +30,55 @@ fn main() { // goto -> bb1; // } // bb1: { -// falseUnwind -> [real: bb3, cleanup: bb4]; +// falseUnwind -> [real: bb2, cleanup: bb11]; // } // bb2: { -// goto -> bb14; -// } -// bb3: { // StorageLive(_2); // StorageLive(_3); // _3 = const true; // FakeRead(ForMatchedPlace, _3); -// switchInt(_3) -> [false: bb5, otherwise: bb6]; -// } -// bb4 (cleanup): { -// resume; +// switchInt(_3) -> [false: bb3, otherwise: bb4]; // } -// bb5: { -// falseEdges -> [real: bb7, imaginary: bb6]; +// bb3: { +// falseEdges -> [real: bb5, imaginary: bb4]; // } -// bb6: { +// bb4: { // _0 = (); -// goto -> bb8; +// goto -> bb10; // } -// bb7: { +// bb5: { // _2 = const 4i32; -// goto -> bb12; -// } -// bb8: { -// StorageDead(_3); -// goto -> bb9; +// goto -> bb8; // } -// bb9: { -// StorageDead(_2); -// goto -> bb2; -// } -// bb10: { +// bb6: { // _4 = (); // unreachable; -// } -// bb11: { -// goto -> bb12; -// } -// bb12: { +// } +// bb7: { +// goto -> bb8; +// } +// bb8: { // FakeRead(ForLet, _2); // StorageDead(_3); // StorageLive(_5); -// StorageLive(_6); +// StorageLive(_6); // _6 = &_2; -// _5 = const std::mem::drop::<&i32>(move _6) -> [return: bb13, unwind: bb4]; +// _5 = const std::mem::drop::<&i32>(move _6) -> [return: bb9, unwind: bb11]; // } -// bb13: { +// bb9: { // StorageDead(_6); // StorageDead(_5); // _1 = (); // StorageDead(_2); // goto -> bb1; // } -// bb14: { +// bb10: { +// StorageDead(_3); +// StorageDead(_2); // return; // } +// bb11 (cleanup): { +// resume; +// } // } // END rustc.main.mir_map.0.mir diff --git a/src/test/mir-opt/issue-62289.rs b/src/test/mir-opt/issue-62289.rs index 8e619ffdf8b96..b2b1a71e10291 100644 --- a/src/test/mir-opt/issue-62289.rs +++ b/src/test/mir-opt/issue-62289.rs @@ -24,68 +24,68 @@ fn main() { // StorageLive(_3); // StorageLive(_4); // _4 = std::option::Option::::None; -// _3 = const as std::ops::Try>::into_result(move _4) -> [return: bb2, unwind: bb3]; +// _3 = const as std::ops::Try>::into_result(move _4) -> [return: bb1, unwind: bb12]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // StorageDead(_4); // _5 = discriminant(_3); -// switchInt(move _5) -> [0isize: bb4, 1isize: bb6, otherwise: bb5]; -// } -// bb3 (cleanup): { -// drop(_2) -> bb1; -// } -// bb4: { -// StorageLive(_10); -// _10 = ((_3 as Ok).0: u32); -// (*_2) = _10; -// StorageDead(_10); -// _1 = move _2; -// drop(_2) -> [return: bb12, unwind: bb11]; +// switchInt(move _5) -> [0isize: bb6, 1isize: bb3, otherwise: bb2]; // } -// bb5: { +// bb2: { // unreachable; // } -// bb6: { +// bb3: { // StorageLive(_6); // _6 = ((_3 as Err).0: std::option::NoneError); // StorageLive(_8); // StorageLive(_9); // _9 = _6; -// _8 = const >::from(move _9) -> [return: bb8, unwind: bb3]; -// } -// bb7: { -// return; +// _8 = const >::from(move _9) -> [return: bb4, unwind: bb12]; // } -// bb8: { +// bb4: { // StorageDead(_9); -// _0 = const > as std::ops::Try>::from_error(move _8) -> [return: bb9, unwind: bb3]; +// _0 = const > as std::ops::Try>::from_error(move _8) -> [return: bb5, unwind: bb12]; // } -// bb9: { +// bb5: { // StorageDead(_8); // StorageDead(_6); -// drop(_2) -> bb10; +// drop(_2) -> bb9; // } -// bb10: { +// bb6: { +// StorageLive(_10); +// _10 = ((_3 as Ok).0: u32); +// (*_2) = _10; +// StorageDead(_10); +// _1 = move _2; +// drop(_2) -> [return: bb7, unwind: bb11]; +// } +// bb7: { // StorageDead(_2); +// _0 = std::option::Option::>::Some(move _1,); +// drop(_1) -> bb8; +// } +// bb8: { // StorageDead(_1); // StorageDead(_3); -// goto -> bb7; +// goto -> bb10; // } -// bb11 (cleanup): { -// drop(_1) -> bb1; -// } -// bb12: { +// bb9: { // StorageDead(_2); -// _0 = std::option::Option::>::Some(move _1,); -// drop(_1) -> bb13; -// } -// bb13: { // StorageDead(_1); // StorageDead(_3); -// goto -> bb7; +// goto -> bb10; +// } +// bb10: { +// return; +// } +// bb11 (cleanup): { +// drop(_1) -> bb13; +// } +// bb12 (cleanup): { +// drop(_2) -> bb13; +// } +// bb13 (cleanup): { +// resume; // } // } // END rustc.test.ElaborateDrops.before.mir diff --git a/src/test/mir-opt/loop_test.rs b/src/test/mir-opt/loop_test.rs index 418febbdc01eb..f0bd001cf8374 100644 --- a/src/test/mir-opt/loop_test.rs +++ b/src/test/mir-opt/loop_test.rs @@ -18,27 +18,26 @@ fn main() { // END RUST SOURCE // START rustc.main.SimplifyCfg-qualify-consts.after.mir // ... -// bb1 (cleanup): { -// resume; -// } -// ... -// bb3: { // Entry into the loop +// bb2: { // Entry into the loop // _1 = (); // StorageDead(_2); // StorageDead(_1); // StorageLive(_4); -// goto -> bb5; +// goto -> bb4; // } // ... -// bb5: { // The loop_block -// falseUnwind -> [real: bb6, cleanup: bb1]; +// bb4: { // The loop_block +// falseUnwind -> [real: bb5, cleanup: bb6]; // } -// bb6: { // The loop body (body_block) +// bb5: { // The loop body (body_block) // StorageLive(_6); // _6 = const 1i32; // FakeRead(ForLet, _6); // StorageDead(_6); -// goto -> bb5; +// goto -> bb4; +// } +// bb6 (cleanup): { +// resume; // } // ... // END rustc.main.SimplifyCfg-qualify-consts.after.mir diff --git a/src/test/mir-opt/match-arm-scopes.rs b/src/test/mir-opt/match-arm-scopes.rs index 7afc3bbd6fae8..27feb4ffb354a 100644 --- a/src/test/mir-opt/match-arm-scopes.rs +++ b/src/test/mir-opt/match-arm-scopes.rs @@ -61,38 +61,28 @@ fn main() { // } // bb0: { // FakeRead(ForMatchedPlace, _2); -// switchInt((_2.0: bool)) -> [false: bb2, otherwise: bb3]; +// switchInt((_2.0: bool)) -> [false: bb1, otherwise: bb4]; // } -// bb1 (cleanup): { -// resume; +// bb1: { +// falseEdges -> [real: bb7, imaginary: bb2]; // } -// bb2: { // pre-binding for arm 1 first pattern -// falseEdges -> [real: bb9, imaginary: bb4]; +// bb2: { +// falseEdges -> [real: bb13, imaginary: bb3]; // } // bb3: { -// switchInt((_2.1: bool)) -> [false: bb4, otherwise: bb5]; +// falseEdges -> [real: bb21, imaginary: bb22]; // } -// bb4: { // pre-binding for arm 1 second pattern -// falseEdges -> [real: bb18, imaginary: bb6]; +// bb4: { +// switchInt((_2.1: bool)) -> [false: bb2, otherwise: bb5]; // } // bb5: { -// switchInt((_2.0: bool)) -> [false: bb7, otherwise: bb6]; -// } -// bb6: { // pre-binding for arm 2 first pattern -// falseEdges -> [real: bb26, imaginary: bb7]; +// switchInt((_2.0: bool)) -> [false: bb22, otherwise: bb3]; // } -// bb7: { // bindings for arm 2 - second pattern -// StorageLive(_15); -// _15 = (_2.1: bool); -// StorageLive(_16); -// _16 = move (_2.2: std::string::String); -// goto -> bb25; -// } -// bb8: { // arm 1 +// bb6: { // arm 1 // _0 = const 1i32; -// drop(_7) -> [return: bb24, unwind: bb14]; +// drop(_7) -> [return: bb19, unwind: bb27]; // } -// bb9: { // guard - first time +// bb7: { // guard - first time // StorageLive(_6); // _6 = &(_2.1: bool); // StorageLive(_8); @@ -103,34 +93,23 @@ fn main() { // StorageLive(_10); // _10 = _1; // FakeRead(ForMatchedPlace, _10); -// switchInt(_10) -> [false: bb11, otherwise: bb10]; +// switchInt(_10) -> [false: bb9, otherwise: bb8]; // } -// bb10: { -// falseEdges -> [real: bb12, imaginary: bb11]; +// bb8: { +// falseEdges -> [real: bb10, imaginary: bb9]; // } -// bb11: { // `else` block - first time +// bb9: { // `else` block - first time // _9 = (*_6); // StorageDead(_10); -// switchInt(move _9) -> [false: bb17, otherwise: bb16]; +// switchInt(move _9) -> [false: bb12, otherwise: bb11]; // } -// bb12: { // `return 3` - first time +// bb10: { // `return 3` - first time // _0 = const 3i32; // StorageDead(_10); // StorageDead(_9); -// StorageDead(_8); -// StorageDead(_6); -// goto -> bb15; -// } -// bb13: { -// return; -// } -// bb14 (cleanup): { -// drop(_2) -> bb1; -// } -// bb15: { -// drop(_2) -> [return: bb13, unwind: bb1]; +// goto -> bb25; // } -// bb16: { +// bb11: { // StorageDead(_9); // FakeRead(ForMatchGuard, _3); // FakeRead(ForMatchGuard, _4); @@ -140,15 +119,15 @@ fn main() { // _5 = (_2.1: bool); // StorageLive(_7); // _7 = move (_2.2: std::string::String); -// goto -> bb8; +// goto -> bb6; // } -// bb17: { // guard otherwise case - first time +// bb12: { // guard otherwise case - first time // StorageDead(_9); // StorageDead(_8); // StorageDead(_6); -// falseEdges -> [real: bb3, imaginary: bb4]; +// falseEdges -> [real: bb4, imaginary: bb2]; // } -// bb18: { // guard - second time +// bb13: { // guard - second time // StorageLive(_6); // _6 = &(_2.0: bool); // StorageLive(_8); @@ -159,25 +138,23 @@ fn main() { // StorageLive(_13); // _13 = _1; // FakeRead(ForMatchedPlace, _13); -// switchInt(_13) -> [false: bb20, otherwise: bb19]; +// switchInt(_13) -> [false: bb15, otherwise: bb14]; // } -// bb19: { -// falseEdges -> [real: bb21, imaginary: bb20]; +// bb14: { +// falseEdges -> [real: bb16, imaginary: bb15]; // } -// bb20: { // `else` block - second time +// bb15: { // `else` block - second time // _12 = (*_6); // StorageDead(_13); -// switchInt(move _12) -> [false: bb23, otherwise: bb22]; +// switchInt(move _12) -> [false: bb18, otherwise: bb17]; // } -// bb21: { +// bb16: { // `return 3` - second time // _0 = const 3i32; // StorageDead(_13); // StorageDead(_12); -// StorageDead(_8); -// StorageDead(_6); -// goto -> bb15; +// goto -> bb25; // } -// bb22: { // bindings for arm 1 +// bb17: { // bindings for arm 1 // StorageDead(_12); // FakeRead(ForMatchGuard, _3); // FakeRead(ForMatchGuard, _4); @@ -187,40 +164,60 @@ fn main() { // _5 = (_2.0: bool); // StorageLive(_7); // _7 = move (_2.2: std::string::String); -// goto -> bb8; +// goto -> bb6; // } -// bb23: { // Guard otherwise case - second time +// bb18: { // Guard otherwise case - second time // StorageDead(_12); // StorageDead(_8); // StorageDead(_6); -// falseEdges -> [real: bb5, imaginary: bb6]; +// falseEdges -> [real: bb5, imaginary: bb3]; // } -// bb24: { // rest of arm 1 +// bb19: { // rest of arm 1 // StorageDead(_7); // StorageDead(_5); // StorageDead(_8); // StorageDead(_6); -// goto -> bb28; +// goto -> bb24; // } -// bb25: { // arm 2 +// bb20: { // arm 2 // _0 = const 2i32; -// drop(_16) -> [return: bb27, unwind: bb14]; +// drop(_16) -> [return: bb23, unwind: bb27]; // } -// bb26: { // bindings for arm 2 - first pattern +// bb21: { // bindings for arm 2 - first pattern // StorageLive(_15); // _15 = (_2.1: bool); // StorageLive(_16); // _16 = move (_2.2: std::string::String); -// goto -> bb25; +// goto -> bb20; // } - -// bb27: { // rest of arm 2 +// bb22: { // bindings for arm 2 - second pattern +// StorageLive(_15); +// _15 = (_2.1: bool); +// StorageLive(_16); +// _16 = move (_2.2: std::string::String); +// goto -> bb20; +// } +// bb23: { // rest of arm 2 // StorageDead(_16); // StorageDead(_15); -// goto -> bb28; +// goto -> bb24; // } -// bb28: { -// drop(_2) -> [return: bb13, unwind: bb1]; +// bb24: { +// drop(_2) -> [return: bb26, unwind: bb28]; +// } +// bb25: { +// StorageDead(_8); +// StorageDead(_6); +// drop(_2) -> [return: bb26, unwind: bb28]; +// } +// bb26: { +// return; +// } +// bb27 (cleanup): { +// drop(_2) -> bb28; +// } +// bb28 (cleanup): { +// resume; // } // END rustc.complicated_match.SimplifyCfg-initial.after.mir // START rustc.complicated_match.ElaborateDrops.after.mir diff --git a/src/test/mir-opt/match_false_edges.rs b/src/test/mir-opt/match_false_edges.rs index 237828d9020db..afc49c48a7f05 100644 --- a/src/test/mir-opt/match_false_edges.rs +++ b/src/test/mir-opt/match_false_edges.rs @@ -4,7 +4,7 @@ fn guard() -> bool { false } -fn guard2(_: i32) -> bool { +fn guard2(_:i32) -> bool { true } @@ -45,36 +45,32 @@ fn main() { // _2 = std::option::Option::::Some(const 42i32,); // FakeRead(ForMatchedPlace, _2); // _3 = discriminant(_2); -// switchInt(move _3) -> [0isize: bb2, 1isize: bb3, otherwise: bb5]; +// switchInt(move _3) -> [0isize: bb3, 1isize: bb1, otherwise: bb4]; // } -// bb1 (cleanup): { -// resume; +// bb1: { +// falseEdges -> [real: bb5, imaginary: bb2]; //pre_binding1 // } -// bb2: { // pre_binding3 and arm3 -// _1 = (const 3i32, const 3i32); -// goto -> bb11; +// bb2: { +// falseEdges -> [real: bb9, imaginary: bb3]; //pre_binding2 // } -// bb3: { -// falseEdges -> [real: bb6, imaginary: bb4]; //pre_binding1 +// bb3: { // pre_binding3 and arm3 +// _1 = (const 3i32, const 3i32); +// goto -> bb10; // } // bb4: { -// falseEdges -> [real: bb10, imaginary: bb2]; //pre_binding2 -// } -// bb5: { // unreachable; // } -// bb6: { // binding1 and guard +// bb5: { // binding1 and guard // StorageLive(_6); -// _11 = const full_tested_match::promoted[0]; -// _6 = &(((*_11) as Some).0: i32); +// _6 = &(((promoted[0]: std::option::Option) as Some).0: i32); // _4 = &shallow _2; // StorageLive(_7); -// _7 = const guard() -> [return: bb7, unwind: bb1]; +// _7 = const guard() -> [return: bb6, unwind: bb11]; // } -// bb7: { // end of guard -// switchInt(move _7) -> [false: bb9, otherwise: bb8]; +// bb6: { // end of guard +// switchInt(move _7) -> [false: bb8, otherwise: bb7]; // } -// bb8: { // arm1 +// bb7: { // arm1 // StorageDead(_7); // FakeRead(ForMatchGuard, _4); // FakeRead(ForGuardBinding, _6); @@ -86,14 +82,14 @@ fn main() { // StorageDead(_8); // StorageDead(_5); // StorageDead(_6); -// goto -> bb11; +// goto -> bb10; // } -// bb9: { // to pre_binding2 +// bb8: { // to pre_binding2 // StorageDead(_7); // StorageDead(_6); -// goto -> bb4; +// goto -> bb2; // } -// bb10: { // arm2 +// bb9: { // arm2 // StorageLive(_9); // _9 = ((_2 as Some).0: i32); // StorageLive(_10); @@ -101,14 +97,17 @@ fn main() { // _1 = (const 2i32, move _10); // StorageDead(_10); // StorageDead(_9); -// goto -> bb11; +// goto -> bb10; // } -// bb11: { +// bb10: { // match exit // StorageDead(_2); // StorageDead(_1); // _0 = (); // return; // } +// bb11 (cleanup): { +// resume; +// } // END rustc.full_tested_match.PromoteTemps.after.mir // // START rustc.full_tested_match2.PromoteTemps.before.mir @@ -117,41 +116,28 @@ fn main() { // _2 = std::option::Option::::Some(const 42i32,); // FakeRead(ForMatchedPlace, _2); // _3 = discriminant(_2); -// switchInt(move _3) -> [0isize: bb2, 1isize: bb3, otherwise: bb5]; -// } -// bb1 (cleanup): { -// resume; -// } -// bb2: { // pre_binding2 -// falseEdges -> [real: bb10, imaginary: bb4]; +// switchInt(move _3) -> [0isize: bb2, 1isize: bb1, otherwise: bb3]; // } -// bb3: { // pre_binding1 -// falseEdges -> [real: bb6, imaginary: bb2]; +// bb1: { +// falseEdges -> [real: bb4, imaginary: bb2]; // } -// bb4: { // binding3 and arm3 -// StorageLive(_9); -// _9 = ((_2 as Some).0: i32); -// StorageLive(_10); -// _10 = _9; -// _1 = (const 2i32, move _10); -// StorageDead(_10); -// StorageDead(_9); -// goto -> bb11; +// bb2: { +// falseEdges -> [real: bb8, imaginary: bb9]; // } -// bb5: { +// bb3: { // unreachable; // } -// bb6: { +// bb4: { // binding1 and guard // StorageLive(_6); // _6 = &((_2 as Some).0: i32); // _4 = &shallow _2; // StorageLive(_7); -// _7 = const guard() -> [return: bb7, unwind: bb1]; +// _7 = const guard() -> [return: bb5, unwind: bb11]; // } -// bb7: { // end of guard -// switchInt(move _7) -> [false: bb9, otherwise: bb8]; +// bb5: { // end of guard +// switchInt(move _7) -> [false: bb7, otherwise: bb6]; // } -// bb8: { +// bb6: { // StorageDead(_7); // FakeRead(ForMatchGuard, _4); // FakeRead(ForGuardBinding, _6); @@ -163,23 +149,36 @@ fn main() { // StorageDead(_8); // StorageDead(_5); // StorageDead(_6); -// goto -> bb11; +// goto -> bb10; // } -// bb9: { // to pre_binding3 (can skip 2 since this is `Some`) +// bb7: { // to pre_binding3 (can skip 2 since this is `Some`) // StorageDead(_7); // StorageDead(_6); -// falseEdges -> [real: bb4, imaginary: bb2]; +// falseEdges -> [real: bb9, imaginary: bb2]; // } -// bb10: { // arm2 +// bb8: { // arm2 // _1 = (const 3i32, const 3i32); -// goto -> bb11; +// goto -> bb10; // } -// bb11: { +// bb9: { // binding3 and arm3 +// StorageLive(_9); +// _9 = ((_2 as Some).0: i32); +// StorageLive(_10); +// _10 = _9; +// _1 = (const 2i32, move _10); +// StorageDead(_10); +// StorageDead(_9); +// goto -> bb10; +// } +// bb10: { // StorageDead(_2); // StorageDead(_1); // _0 = (); // return; // } +// bb11 (cleanup): { +// resume; +// } // END rustc.full_tested_match2.PromoteTemps.before.mir // // START rustc.main.PromoteTemps.before.mir @@ -188,38 +187,28 @@ fn main() { // _2 = std::option::Option::::Some(const 1i32,); // FakeRead(ForMatchedPlace, _2); // _4 = discriminant(_2); -// switchInt(move _4) -> [1isize: bb3, otherwise: bb2]; +// switchInt(move _4) -> [1isize: bb1, otherwise: bb2]; // } -// bb1 (cleanup): { -// resume; +// bb1: { +// falseEdges -> [real: bb4, imaginary: bb2]; // } // bb2: { -// falseEdges -> [real: bb10, imaginary: bb5]; +// falseEdges -> [real: bb8, imaginary: bb3]; // } // bb3: { -// falseEdges -> [real: bb6, imaginary: bb2]; +// falseEdges -> [real: bb9, imaginary: bb13]; // } // bb4: { -// StorageLive(_14); -// _14 = _2; -// _1 = const 4i32; -// StorageDead(_14); -// goto -> bb15; -// } -// bb5: { -// falseEdges -> [real: bb11, imaginary: bb4]; -// } -// bb6: { //end of guard1 // StorageLive(_7); // _7 = &((_2 as Some).0: i32); // _5 = &shallow _2; // StorageLive(_8); -// _8 = const guard() -> [return: bb7, unwind: bb1]; +// _8 = const guard() -> [return: bb5, unwind: bb15]; // } -// bb7: { -// switchInt(move _8) -> [false: bb9, otherwise: bb8]; +// bb5: { //end of guard1 +// switchInt(move _8) -> [false: bb7, otherwise: bb6]; // } -// bb8: { +// bb6: { // StorageDead(_8); // FakeRead(ForMatchGuard, _5); // FakeRead(ForGuardBinding, _7); @@ -228,34 +217,34 @@ fn main() { // _1 = const 1i32; // StorageDead(_6); // StorageDead(_7); -// goto -> bb15; +// goto -> bb14; // } -// bb9: { +// bb7: { // StorageDead(_8); // StorageDead(_7); // falseEdges -> [real: bb2, imaginary: bb2]; // } -// bb10: { // binding2 & arm2 +// bb8: { // binding2 & arm2 // StorageLive(_9); // _9 = _2; // _1 = const 2i32; // StorageDead(_9); -// goto -> bb15; +// goto -> bb14; // } -// bb11: { // binding3: Some(y) if guard2(y) +// bb9: { // binding3: Some(y) if guard2(y) // StorageLive(_11); // _11 = &((_2 as Some).0: i32); // _5 = &shallow _2; // StorageLive(_12); // StorageLive(_13); // _13 = (*_11); -// _12 = const guard2(move _13) -> [return: bb12, unwind: bb1]; +// _12 = const guard2(move _13) -> [return: bb10, unwind: bb15]; // } -// bb12: { // end of guard2 +// bb10: { // end of guard2 // StorageDead(_13); -// switchInt(move _12) -> [false: bb14, otherwise: bb13]; +// switchInt(move _12) -> [false: bb12, otherwise: bb11]; // } -// bb13: { // binding4 & arm4 +// bb11: { // binding4 & arm4 // StorageDead(_12); // FakeRead(ForMatchGuard, _5); // FakeRead(ForGuardBinding, _11); @@ -264,17 +253,27 @@ fn main() { // _1 = const 3i32; // StorageDead(_10); // StorageDead(_11); -// goto -> bb15; +// goto -> bb14; // } -// bb14: { +// bb12: { // StorageDead(_12); // StorageDead(_11); -// falseEdges -> [real: bb4, imaginary: bb4]; +// falseEdges -> [real: bb13, imaginary: bb13]; // } -// bb15: { +// bb13: { +// StorageLive(_14); +// _14 = _2; +// _1 = const 4i32; +// StorageDead(_14); +// goto -> bb14; +// } +// bb14: { // StorageDead(_2); // StorageDead(_1); // _0 = (); // return; // } +// bb15 (cleanup): { +// resume; +// } // END rustc.main.PromoteTemps.before.mir diff --git a/src/test/mir-opt/nll/region-subtyping-basic.rs b/src/test/mir-opt/nll/region-subtyping-basic.rs index 16e357fc16255..2dca483a37963 100644 --- a/src/test/mir-opt/nll/region-subtyping-basic.rs +++ b/src/test/mir-opt/nll/region-subtyping-basic.rs @@ -22,9 +22,9 @@ fn main() { // END RUST SOURCE // START rustc.main.nll.0.mir -// | '_#2r | U0 | {bb2[0..=8], bb3[0], bb5[0..=2]} -// | '_#3r | U0 | {bb2[1..=8], bb3[0], bb5[0..=2]} -// | '_#4r | U0 | {bb2[4..=8], bb3[0], bb5[0..=2]} +// | '_#2r | U0 | {bb1[0..=8], bb2[0], bb4[0..=2]} +// | '_#3r | U0 | {bb1[1..=8], bb2[0], bb4[0..=2]} +// | '_#4r | U0 | {bb1[4..=8], bb2[0], bb4[0..=2]} // END rustc.main.nll.0.mir // START rustc.main.nll.0.mir // let _2: &'_#3r usize; diff --git a/src/test/mir-opt/no-spurious-drop-after-call.rs b/src/test/mir-opt/no-spurious-drop-after-call.rs index 782bc31186ca5..e4e906edd195a 100644 --- a/src/test/mir-opt/no-spurious-drop-after-call.rs +++ b/src/test/mir-opt/no-spurious-drop-after-call.rs @@ -10,11 +10,11 @@ fn main() { // END RUST SOURCE // START rustc.main.ElaborateDrops.before.mir -// bb2: { +// bb1: { // StorageDead(_3); -// _1 = const std::mem::drop::(move _2) -> [return: bb3, unwind: bb4]; +// _1 = const std::mem::drop::(move _2) -> [return: bb2, unwind: bb3]; // } -// bb3: { +// bb2: { // StorageDead(_2); // StorageDead(_4); // StorageDead(_1); diff --git a/src/test/mir-opt/packed-struct-drop-aligned.rs b/src/test/mir-opt/packed-struct-drop-aligned.rs index 113f81c441f7c..fa6479cdc2316 100644 --- a/src/test/mir-opt/packed-struct-drop-aligned.rs +++ b/src/test/mir-opt/packed-struct-drop-aligned.rs @@ -37,23 +37,23 @@ impl Drop for Droppy { // _6 = move (_1.0: Aligned); // drop(_6) -> [return: bb4, unwind: bb3]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // StorageDead(_1); // return; // } +// bb2 (cleanup): { +// resume; +// } // bb3 (cleanup): { // (_1.0: Aligned) = move _4; -// drop(_1) -> bb1; +// drop(_1) -> bb2; // } // bb4: { // StorageDead(_6); // (_1.0: Aligned) = move _4; // StorageDead(_4); // _0 = (); -// drop(_1) -> [return: bb2, unwind: bb1]; +// drop(_1) -> [return: bb1, unwind: bb2]; // } // } // END rustc.main.EraseRegions.before.mir diff --git a/src/test/mir-opt/retag.rs b/src/test/mir-opt/retag.rs index 1c88a9e4d5a32..00795dc56df4b 100644 --- a/src/test/mir-opt/retag.rs +++ b/src/test/mir-opt/retag.rs @@ -65,12 +65,12 @@ fn main() { // ... // bb0: { // ... -// _3 = const Test::foo(move _4, move _6) -> [return: bb2, unwind: bb3]; +// _3 = const Test::foo(move _4, move _6) -> [return: bb1, unwind: bb7]; // } // // ... // -// bb2: { +// bb1: { // Retag(_3); // ... // _9 = move _3; @@ -82,23 +82,25 @@ fn main() { // _10 = move _8; // Retag(_10); // ... -// _12 = &raw mut (*_10); +// _13 = &mut (*_10); +// Retag(_13); +// _12 = move _13 as *mut i32 (Misc); // Retag([raw] _12); // ... -// _15 = move _16(move _17) -> bb5; +// _16 = move _17(move _18) -> bb3; // } // -// bb5: { -// Retag(_15); +// bb3: { +// Retag(_16); // ... -// _19 = const Test::foo_shr(move _20, move _22) -> [return: bb6, unwind: bb7]; +// _20 = const Test::foo_shr(move _21, move _23) -> [return: bb4, unwind: bb6]; // } // // ... // } // END rustc.main.EraseRegions.after.mir // START rustc.main-{{closure}}.EraseRegions.after.mir -// fn main::{{closure}}#0(_1: &[closure@main::{{closure}}#0], _2: &i32) -> &i32 { +// fn main::{{closure}}#0(_1: &[closure@HirId { owner: DefIndex(13), local_id: 72 }], _2: &i32) -> &i32 { // ... // bb0: { // Retag([fn entry] _1); @@ -113,8 +115,8 @@ fn main() { // } // } // END rustc.main-{{closure}}.EraseRegions.after.mir -// START rustc.ptr-drop_in_place.Test.SimplifyCfg-make_shim.after.mir -// fn std::intrinsics::drop_in_place(_1: *mut Test) -> () { +// START rustc.ptr-real_drop_in_place.Test.SimplifyCfg-make_shim.after.mir +// fn std::ptr::real_drop_in_place(_1: &mut Test) -> () { // ... // bb0: { // Retag([raw] _1); @@ -126,4 +128,4 @@ fn main() { // return; // } // } -// END rustc.ptr-drop_in_place.Test.SimplifyCfg-make_shim.after.mir +// END rustc.ptr-real_drop_in_place.Test.SimplifyCfg-make_shim.after.mir diff --git a/src/test/mir-opt/simple-match.rs b/src/test/mir-opt/simple-match.rs index fc1a3bb1bf453..9e5709e9d5b06 100644 --- a/src/test/mir-opt/simple-match.rs +++ b/src/test/mir-opt/simple-match.rs @@ -14,26 +14,20 @@ fn main() {} // START rustc.match_bool.mir_map.0.mir // bb0: { // FakeRead(ForMatchedPlace, _1); -// switchInt(_1) -> [false: bb3, otherwise: bb2]; +// switchInt(_1) -> [false: bb2, otherwise: bb1]; // } -// bb1 (cleanup): { -// resume; +// bb1: { +// falseEdges -> [real: bb3, imaginary: bb2]; // } // bb2: { -// falseEdges -> [real: bb4, imaginary: bb3]; -// } -// bb3: { // _0 = const 20usize; -// goto -> bb5; +// goto -> bb4; // } -// bb4: { +// bb3: { // _0 = const 10usize; -// goto -> bb5; -// } -// bb5: { -// goto -> bb6; +// goto -> bb4; // } -// bb6: { +// bb4: { // return; // } // END rustc.match_bool.mir_map.0.mir diff --git a/src/test/mir-opt/simplify_cfg.rs b/src/test/mir-opt/simplify_cfg.rs index ef843f7158130..adecf80411736 100644 --- a/src/test/mir-opt/simplify_cfg.rs +++ b/src/test/mir-opt/simplify_cfg.rs @@ -19,20 +19,20 @@ fn bar() -> bool { // goto -> bb1; // } // bb1: { -// falseUnwind -> [real: bb3, cleanup: bb4]; +// falseUnwind -> [real: bb2, cleanup: bb11]; // } // ... -// bb11: { +// bb9: { // ... // goto -> bb1; // } // END rustc.main.SimplifyCfg-initial.before.mir // START rustc.main.SimplifyCfg-initial.after.mir // bb0: { -// falseUnwind -> [real: bb1, cleanup: bb2]; +// falseUnwind -> [real: bb1, cleanup: bb6]; // } // ... -// bb5: { +// bb4: { // ... // goto -> bb0; // } @@ -43,7 +43,7 @@ fn bar() -> bool { // } // bb1: { // StorageLive(_2); -// _2 = const bar() -> bb3; +// _2 = const bar() -> bb2; // } // END rustc.main.SimplifyCfg-early-opt.before.mir // START rustc.main.SimplifyCfg-early-opt.after.mir diff --git a/src/test/mir-opt/unusual-item-types.rs b/src/test/mir-opt/unusual-item-types.rs index 88cfb62a0d094..3590bd1045bf8 100644 --- a/src/test/mir-opt/unusual-item-types.rs +++ b/src/test/mir-opt/unusual-item-types.rs @@ -30,9 +30,6 @@ fn main() { // _0 = const 2i32; // return; // } -// bb1 (cleanup): { -// resume; -// } // END rustc.{{impl}}-ASSOCIATED_CONSTANT.mir_map.0.mir // START rustc.E-V-{{constant}}.mir_map.0.mir @@ -40,9 +37,6 @@ fn main() { // _0 = const 5isize; // return; // } -// bb1 (cleanup): { -// resume; -// } // END rustc.E-V-{{constant}}.mir_map.0.mir // START rustc.ptr-drop_in_place.std__vec__Vec_i32_.AddMovesForPackedDrops.before.mir From 82c4fa1eb2ff895a2495bde231284e26249acc19 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 16 Nov 2019 13:23:31 +0000 Subject: [PATCH 02/16] Temp: Initial impl --- src/librustc_mir_build/build/block.rs | 18 +- src/librustc_mir_build/build/expr/into.rs | 27 +- src/librustc_mir_build/build/matches/mod.rs | 12 +- src/librustc_mir_build/build/matches/test.rs | 6 +- src/librustc_mir_build/build/mod.rs | 91 +- src/librustc_mir_build/build/scope.rs | 1097 ++++++++---------- 6 files changed, 563 insertions(+), 688 deletions(-) diff --git a/src/librustc_mir_build/build/block.rs b/src/librustc_mir_build/build/block.rs index df5526ad76281..64cca99b2ebd5 100644 --- a/src/librustc_mir_build/build/block.rs +++ b/src/librustc_mir_build/build/block.rs @@ -26,14 +26,16 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.in_opt_scope(opt_destruction_scope.map(|de| (de, source_info)), move |this| { this.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| { if targeted_by_break { - // This is a `break`-able block - let exit_block = this.cfg.start_new_block(); - let block_exit = - this.in_breakable_scope(None, exit_block, destination.clone(), |this| { - this.ast_block_stmts(destination, block, span, stmts, expr, safety_mode) - }); - this.cfg.goto(unpack!(block_exit), source_info, exit_block); - exit_block.unit() + this.in_breakable_scope(None, destination.clone(), span, |this| { + Some(this.ast_block_stmts( + destination, + block, + span, + stmts, + expr, + safety_mode, + )) + }) } else { this.ast_block_stmts(destination, block, span, stmts, expr, safety_mode) } diff --git a/src/librustc_mir_build/build/expr/into.rs b/src/librustc_mir_build/build/expr/into.rs index 4583e244f493d..4bea0f4801405 100644 --- a/src/librustc_mir_build/build/expr/into.rs +++ b/src/librustc_mir_build/build/expr/into.rs @@ -134,26 +134,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // body, even when the exact code in the body cannot unwind let loop_block = this.cfg.start_new_block(); - let exit_block = this.cfg.start_new_block(); // Start the loop. this.cfg.goto(block, source_info, loop_block); this.in_breakable_scope( Some(loop_block), - exit_block, destination.clone(), + expr_span, move |this| { // conduct the test, if necessary let body_block = this.cfg.start_new_block(); - let diverge_cleanup = this.diverge_cleanup(); + this.diverge_from(loop_block); this.cfg.terminate( loop_block, source_info, - TerminatorKind::FalseUnwind { - real_target: body_block, - unwind: Some(diverge_cleanup), - }, + TerminatorKind::FalseUnwind { real_target: body_block, unwind: None }, ); // The “return” value of the loop body must always be an unit. We therefore @@ -162,9 +158,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Execute the body, branching back to the test. let body_block_end = unpack!(this.into(&tmp, body_block, body)); this.cfg.goto(body_block_end, source_info, loop_block); + None }, - ); - exit_block.unit() + ) } ExprKind::Call { ty, fun, args, from_hir_call } => { let intrinsic = match ty.kind { @@ -211,17 +207,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .collect(); let success = this.cfg.start_new_block(); - let cleanup = this.diverge_cleanup(); this.record_operands_moved(&args); + this.diverge_from(block); this.cfg.terminate( block, source_info, TerminatorKind::Call { func: fun, args, - cleanup: Some(cleanup), + cleanup: None, // FIXME(varkor): replace this with an uninhabitedness-based check. // This requires getting access to the current module to call // `tcx.is_ty_uninhabited_from`, which is currently tricky to do. @@ -369,16 +365,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let scope = this.local_scope(); let value = unpack!(block = this.as_operand(block, scope, value)); let resume = this.cfg.start_new_block(); - let cleanup = this.generator_drop_cleanup(); + this.generator_drop_cleanup(block); this.cfg.terminate( block, source_info, - TerminatorKind::Yield { - value, - resume, - resume_arg: *destination, - drop: cleanup, - }, + TerminatorKind::Yield { value, resume, resume_arg: *destination, drop: None }, ); resume.unit() } diff --git a/src/librustc_mir_build/build/matches/mod.rs b/src/librustc_mir_build/build/matches/mod.rs index 7995125524314..6f6eb71a0c275 100644 --- a/src/librustc_mir_build/build/matches/mod.rs +++ b/src/librustc_mir_build/build/matches/mod.rs @@ -12,7 +12,6 @@ use crate::build::{GuardFrame, GuardFrameLocal, LocalsForNode}; use crate::hair::{self, *}; use rustc::middle::region; use rustc::mir::*; -use rustc::ty::layout::VariantIdx; use rustc::ty::{self, CanonicalUserTypeAnnotation, Ty}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_hir::HirId; @@ -250,7 +249,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { arm.guard.as_ref().map(|g| (g, match_scope)), &fake_borrow_temps, scrutinee_span, - Some(arm.scope), + // Some(arm.scope), ); if let Some(source_scope) = scope { @@ -1564,7 +1563,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { guard: Option<(&Guard<'tcx>, region::Scope)>, fake_borrows: &Vec<(Place<'tcx>, Local)>, scrutinee_span: Span, - schedule_drops: bool, + //region_scope: region::Scope, ) -> BasicBlock { debug!("bind_and_guard_matched_candidate(candidate={:?})", candidate); @@ -1717,11 +1716,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { unreachable }); let outside_scope = self.cfg.start_new_block(); - self.exit_scope( - source_info.span, - region_scope, + self.exit_top_scope( otherwise_post_guard_block, - outside_scope, + candidate.otherwise_block.unwrap(), + source_info, ); self.false_edges( outside_scope, diff --git a/src/librustc_mir_build/build/matches/test.rs b/src/librustc_mir_build/build/matches/test.rs index 1acfa7dddbe1f..6b47937a89e2a 100644 --- a/src/librustc_mir_build/build/matches/test.rs +++ b/src/librustc_mir_build/build/matches/test.rs @@ -426,7 +426,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let bool_ty = self.hir.bool_ty(); let eq_result = self.temp(bool_ty, source_info.span); let eq_block = self.cfg.start_new_block(); - let cleanup = self.diverge_cleanup(); + self.diverge_from(block); self.cfg.terminate( block, source_info, @@ -443,8 +443,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { literal: method, }), args: vec![val, expect], - destination: Some((eq_result, eq_block)), - cleanup: Some(cleanup), + destination: Some((eq_result.clone(), eq_block)), + cleanup: None, from_hir_call: false, }, ); diff --git a/src/librustc_mir_build/build/mod.rs b/src/librustc_mir_build/build/mod.rs index 8b7d0637c0343..3b6aa7bac8afb 100644 --- a/src/librustc_mir_build/build/mod.rs +++ b/src/librustc_mir_build/build/mod.rs @@ -317,11 +317,6 @@ struct Builder<'a, 'tcx> { var_debug_info: Vec>, - /// Cached block with the `RESUME` terminator; this is created - /// when first set of cleanups are built. - cached_resume_block: Option, - /// Cached block with the `RETURN` terminator. - cached_return_block: Option, /// Cached block with the `UNREACHABLE` terminator. cached_unreachable_block: Option, } @@ -583,50 +578,34 @@ where region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::CallSite }; let arg_scope = region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::Arguments }; - let mut block = START_BLOCK; let source_info = builder.source_info(span); let call_site_s = (call_site_scope, source_info); - unpack!( - block = builder.in_scope(call_site_s, LintLevel::Inherited, |builder| { - if should_abort_on_panic(tcx, fn_def_id, abi) { - builder.schedule_abort(); - } - - let arg_scope_s = (arg_scope, source_info); - // `return_block` is called when we evaluate a `return` expression, so - // we just use `START_BLOCK` here. - unpack!( - block = builder.in_breakable_scope( - None, - START_BLOCK, - Place::return_place(), - |builder| { - builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| { - builder.args_and_body( - block, - fn_def_id, - &arguments, - arg_scope, - &body.value, - ) - }) - }, - ) - ); - // Attribute epilogue to function's closing brace - let fn_end = span.shrink_to_hi(); - let source_info = builder.source_info(fn_end); - let return_block = builder.return_block(); - builder.cfg.goto(block, source_info, return_block); - builder.cfg.terminate(return_block, source_info, TerminatorKind::Return); - // Attribute any unreachable codepaths to the function's closing brace - if let Some(unreachable_block) = builder.cached_unreachable_block { - builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable); - } - return_block.unit() - }) - ); - assert_eq!(block, builder.return_block()); + unpack!(builder.in_scope(call_site_s, LintLevel::Inherited, |builder| { + let arg_scope_s = (arg_scope, source_info); + // Attribute epilogue to function's closing brace + let fn_end = span.shrink_to_hi(); + let return_block = + unpack!(builder.in_breakable_scope(None, Place::return_place(), fn_end, |builder| { + Some(builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| { + builder.args_and_body( + START_BLOCK, + fn_def_id, + &arguments, + arg_scope, + &body.value, + ) + })) + },)); + let source_info = builder.source_info(fn_end); + builder.cfg.terminate(return_block, source_info, TerminatorKind::Return); + let should_abort = should_abort_on_panic(tcx, fn_def_id, abi); + builder.build_drop_trees(should_abort); + // Attribute any unreachable codepaths to the function's closing brace + if let Some(unreachable_block) = builder.cached_unreachable_block { + builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable); + } + return_block.unit() + })); let mut spread_arg = None; if abi == Abi::RustCall { @@ -659,9 +638,6 @@ fn construct_const<'a, 'tcx>( let source_info = builder.source_info(span); builder.cfg.terminate(block, source_info, TerminatorKind::Return); - // Constants can't `return` so a return block should not be created. - assert_eq!(builder.cached_return_block, None); - // Constants may be match expressions in which case an unreachable block may // be created, so terminate it properly. if let Some(unreachable_block) = builder.cached_unreachable_block { @@ -735,7 +711,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fn_span: span, arg_count, generator_kind, - scopes: Default::default(), + scopes: scope::Scopes::new(is_generator), block_context: BlockContext::new(), source_scopes: IndexVec::new(), source_scope: OUTERMOST_SOURCE_SCOPE, @@ -751,8 +727,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { var_indices: Default::default(), unit_temp: None, var_debug_info: vec![], - cached_resume_block: None, - cached_return_block: None, cached_unreachable_block: None, }; @@ -997,17 +971,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } } - - fn return_block(&mut self) -> BasicBlock { - match self.cached_return_block { - Some(rb) => rb, - None => { - let rb = self.cfg.start_new_block(); - self.cached_return_block = Some(rb); - rb - } - } - } } /////////////////////////////////////////////////////////////////////////// diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index a63ac06ec3fe9..443e15e6c747a 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -89,10 +89,27 @@ use rustc::mir::*; use rustc_data_structures::fx::FxHashMap; use rustc_hir as hir; use rustc_hir::GeneratorKind; +use rustc_index::vec::{Idx, IndexVec}; use rustc_span::{Span, DUMMY_SP}; use std::collections::hash_map::Entry; use std::mem; +#[derive(Debug)] +pub struct Scopes<'tcx> { + scopes: Vec, + /// The current set of breakable scopes. See module comment for more details. + breakable_scopes: Vec>, + + /// Drops that need to be done on unwind paths. See the comment on + /// [DropTree] for more details. + unwind_drops: DropTree, + + /// Drops that need to be done on paths to the `GeneratorDrop` terminator. + generator_drops: DropTree, + // TODO: what's this? + // cached_unwind_drop: DropIdx, +} + #[derive(Debug)] struct Scope { /// The source scope this scope was created in. @@ -111,27 +128,12 @@ struct Scope { drops: Vec, moved_locals: Vec, - - /// The cache for drop chain on “normal” exit into a particular BasicBlock. - cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>, - - /// The cache for drop chain on "generator drop" exit. - cached_generator_drop: Option, - - /// The cache for drop chain on "unwind" exit. - cached_unwind: CachedBlock, -} - -#[derive(Debug, Default)] -crate struct Scopes<'tcx> { - scopes: Vec, - /// The current set of breakable scopes. See module comment for more details. - breakable_scopes: Vec>, } -#[derive(Debug)] +#[derive(Clone, Copy, Debug)] struct DropData { - /// span where drop obligation was incurred (typically where place was declared) + /// The `Span` where drop obligation was incurred (typically where place was + /// declared) span: Span, /// local to drop @@ -139,46 +141,23 @@ struct DropData { /// Whether this is a value Drop or a StorageDead. kind: DropKind, - - /// The cached blocks for unwinds. - cached_block: CachedBlock, -} - -#[derive(Debug, Default, Clone, Copy)] -struct CachedBlock { - /// The cached block for the cleanups-on-diverge path. This block - /// contains code to run the current drop and all the preceding - /// drops (i.e., those having lower index in Drop’s Scope drop - /// array) - unwind: Option, - - /// The cached block for unwinds during cleanups-on-generator-drop path - /// - /// This is split from the standard unwind path here to prevent drop - /// elaboration from creating drop flags that would have to be captured - /// by the generator. I'm not sure how important this optimization is, - /// but it is here. - generator_drop: Option, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(crate) enum DropKind { Value, Storage, } -#[derive(Clone, Debug)] +#[derive(Debug)] struct BreakableScope<'tcx> { /// Region scope of the loop region_scope: region::Scope, - /// Where the body of the loop begins. `None` if block - continue_block: Option, - /// Block to branch into when the loop or block terminates (either by being - /// `break`-en out from, or by having its condition to become false) - break_block: BasicBlock, /// The destination of the loop/block expression itself (i.e., where to put - /// the result of a `break` expression) + /// the result of a `break` or `return` expression) break_destination: Place<'tcx>, + /// Drops that happen on the + drops: DropTree, } /// The target of an expression that breaks out of a scope @@ -189,56 +168,31 @@ crate enum BreakableTarget { Return, } -impl CachedBlock { - fn invalidate(&mut self) { - *self = CachedBlock::default(); - } +rustc_index::newtype_index! { + struct DropIdx { .. } +} - fn get(&self, generator_drop: bool) -> Option { - if generator_drop { self.generator_drop } else { self.unwind } - } +const ROOT_NODE: DropIdx = DropIdx::from_u32_const(0); +const CONTINUE_NODE: DropIdx = DropIdx::from_u32_const(1); - fn ref_mut(&mut self, generator_drop: bool) -> &mut Option { - if generator_drop { &mut self.generator_drop } else { &mut self.unwind } - } +/// A tree of drops that we have deferred lowering. +// TODO say some more. +#[derive(Debug)] +struct DropTree { + /// The next item to drop, if there is one. + // TODO actual comment + drops: IndexVec, + /// Map for finding the inverse of the `next_drop` relation: + /// + /// `previous_drops[(next_drop[i], drops[i].local, drops[i].kind] == i` + previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>, + /// Edges into the `DropTree` that need to be added once it's lowered. + entry_points: Vec<(DropIdx, BasicBlock)>, + /// The number of root nodes in the tree. + num_roots: DropIdx, } impl Scope { - /// Invalidates all the cached blocks in the scope. - /// - /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a - /// larger extent of code. - /// - /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`. - /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current - /// top-of-scope (as opposed to dependent scopes). - fn invalidate_cache( - &mut self, - storage_only: bool, - generator_kind: Option, - this_scope_only: bool, - ) { - // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions - // with lots of `try!`? - - // cached exits drop storage and refer to the top-of-scope - self.cached_exits.clear(); - - // the current generator drop and unwind refer to top-of-scope - self.cached_generator_drop = None; - - let ignore_unwinds = storage_only && generator_kind.is_none(); - if !ignore_unwinds { - self.cached_unwind.invalidate(); - } - - if !ignore_unwinds && !this_scope_only { - for drop_data in &mut self.drops { - drop_data.cached_block.invalidate(); - } - } - } - /// Given a span and this scope's source scope, make a SourceInfo. fn source_info(&self, span: Span) -> SourceInfo { SourceInfo { span, scope: self.source_scope } @@ -263,9 +217,40 @@ impl Scope { } } +impl DropTree { + fn new(num_roots: usize) -> Self { + let fake_data = DropData { span: DUMMY_SP, local: Local::MAX, kind: DropKind::Storage }; + let drop_idx = DropIdx::MAX; + let drops = IndexVec::from_elem_n((fake_data, drop_idx), num_roots); + Self { + drops, + num_roots: DropIdx::from_usize(num_roots), + entry_points: Vec::new(), + previous_drops: FxHashMap::default(), + } + } + + fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx { + let drops = &mut self.drops; + *self + .previous_drops + .entry((next, drop.local, drop.kind)) + .or_insert_with(|| drops.push((drop, next))) + } + + fn add_entry(&mut self, from: BasicBlock, to: DropIdx) { + self.entry_points.push((to, from)); + } +} + impl<'tcx> Scopes<'tcx> { - fn len(&self) -> usize { - self.scopes.len() + pub(crate) fn new(is_generator: bool) -> Self { + Self { + scopes: Vec::new(), + breakable_scopes: Vec::new(), + unwind_drops: DropTree::new(1), + generator_drops: DropTree::new(is_generator as usize), + } } fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) { @@ -276,94 +261,35 @@ impl<'tcx> Scopes<'tcx> { region_scope_span: region_scope.1.span, drops: vec![], moved_locals: vec![], - cached_generator_drop: None, - cached_exits: Default::default(), - cached_unwind: CachedBlock::default(), }); } - fn pop_scope( - &mut self, - region_scope: (region::Scope, SourceInfo), - ) -> (Scope, Option) { + fn pop_scope(&mut self, region_scope: (region::Scope, SourceInfo)) -> Scope { let scope = self.scopes.pop().unwrap(); assert_eq!(scope.region_scope, region_scope.0); - let unwind_to = - self.scopes.last().and_then(|next_scope| next_scope.cached_unwind.get(false)); - (scope, unwind_to) - } - - fn may_panic(&self, scope_count: usize) -> bool { - let len = self.len(); - self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup()) - } - - /// Finds the breakable scope for a given label. This is used for - /// resolving `return`, `break` and `continue`. - fn find_breakable_scope( - &self, - span: Span, - target: BreakableTarget, - ) -> (BasicBlock, region::Scope, Option>) { - let get_scope = |scope: region::Scope| { - // find the loop-scope by its `region::Scope`. - self.breakable_scopes - .iter() - .rfind(|breakable_scope| breakable_scope.region_scope == scope) - .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found")) - }; - match target { - BreakableTarget::Return => { - let scope = &self.breakable_scopes[0]; - if scope.break_destination != Place::return_place() { - span_bug!(span, "`return` in item with no return scope"); - } - (scope.break_block, scope.region_scope, Some(scope.break_destination)) - } - BreakableTarget::Break(scope) => { - let scope = get_scope(scope); - (scope.break_block, scope.region_scope, Some(scope.break_destination)) - } - BreakableTarget::Continue(scope) => { - let scope = get_scope(scope); - let continue_block = scope - .continue_block - .unwrap_or_else(|| span_bug!(span, "missing `continue` block")); - (continue_block, scope.region_scope, None) - } - } + scope } - fn num_scopes_above(&self, region_scope: region::Scope, span: Span) -> usize { - let scope_count = self - .scopes + fn scope_index(&self, region_scope: region::Scope, span: Span) -> usize { + self.scopes .iter() - .rev() - .position(|scope| scope.region_scope == region_scope) - .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope)); - let len = self.len(); - assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes"); - scope_count + .rposition(|scope| scope.region_scope == region_scope) + .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope)) } fn iter_mut(&mut self) -> impl DoubleEndedIterator + '_ { self.scopes.iter_mut().rev() } - fn top_scopes(&mut self, count: usize) -> impl DoubleEndedIterator + '_ { - let len = self.len(); - self.scopes[len - count..].iter_mut() - } - /// Returns the topmost active scope, which is known to be alive until /// the next scope expression. pub(super) fn topmost(&self) -> region::Scope { self.scopes.last().expect("topmost_scope: no scopes present").region_scope } - fn source_info(&self, index: usize, span: Span) -> SourceInfo { - self.scopes[self.len() - index].source_info(span) - } + // fn source_info(&self, index: usize, span: Span) -> SourceInfo { + // self.scopes[self.len() - index].source_info(span) + // } } impl<'a, 'tcx> Builder<'a, 'tcx> { @@ -371,28 +297,46 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // ========================== // Start a breakable scope, which tracks where `continue`, `break` and // `return` should branch to. - crate fn in_breakable_scope( + pub fn in_breakable_scope( &mut self, loop_block: Option, - break_block: BasicBlock, break_destination: Place<'tcx>, + span: Span, f: F, - ) -> R + ) -> BlockAnd<()> where - F: FnOnce(&mut Builder<'a, 'tcx>) -> R, + F: FnOnce(&mut Builder<'a, 'tcx>) -> Option>, { let region_scope = self.scopes.topmost(); let scope = BreakableScope { region_scope, - continue_block: loop_block, - break_block, break_destination, + drops: DropTree::new(1 + loop_block.is_some() as usize), }; self.scopes.breakable_scopes.push(scope); - let res = f(self); + let normal_exit_block = f(self); let breakable_scope = self.scopes.breakable_scopes.pop().unwrap(); assert!(breakable_scope.region_scope == region_scope); - res + let break_block = self.build_exit_tree(breakable_scope.drops, loop_block); + match (normal_exit_block, break_block) { + (Some(block), None) | (None, Some(block)) => block, + (None, None) => self.cfg.start_new_block().unit(), + (Some(normal_block), Some(exit_block)) => { + let target = self.cfg.start_new_block(); + let source_info = self.source_info(span); + self.cfg.terminate( + unpack!(normal_block), + source_info, + TerminatorKind::Goto { target }, + ); + self.cfg.terminate( + unpack!(exit_block), + source_info, + TerminatorKind::Goto { target }, + ); + target.unit() + } + } } crate fn in_opt_scope( @@ -476,30 +420,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { mut block: BasicBlock, ) -> BlockAnd<()> { debug!("pop_scope({:?}, {:?})", region_scope, block); - // If we are emitting a `drop` statement, we need to have the cached - // diverge cleanup pads ready in case that drop panics. - if self.scopes.may_panic(1) { - self.diverge_cleanup(); - } - let (scope, unwind_to) = self.scopes.pop_scope(region_scope); - let unwind_to = unwind_to.unwrap_or_else(|| self.resume_block()); - unpack!( - block = build_scope_drops( - &mut self.cfg, - self.generator_kind, - &scope, - block, - unwind_to, - self.arg_count, - false, // not generator - false, // not unwind path - ) - ); + block = self.leave_top_scope(block); + + self.scopes.pop_scope(region_scope); block.unit() } + /// Sets up the drops for breaking from `block` to `target`. crate fn break_scope( &mut self, mut block: BasicBlock, @@ -507,152 +436,124 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { scope: BreakableTarget, source_info: SourceInfo, ) -> BlockAnd<()> { - let (mut target_block, region_scope, destination) = - self.scopes.find_breakable_scope(source_info.span, scope); - if let BreakableTarget::Return = scope { - // We call this now, rather than when we start lowering the - // function so that the return block doesn't precede the entire - // rest of the CFG. Some passes and LLVM prefer blocks to be in - // approximately CFG order. - target_block = self.return_block(); - } - if let Some(destination) = destination { + let span = source_info.span; + + let get_scope_index = |scope: region::Scope| { + // find the loop-scope by its `region::Scope`. + self.scopes + .breakable_scopes + .iter() + .rposition(|breakable_scope| breakable_scope.region_scope == scope) + .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found")) + }; + let (break_index, destination) = match scope { + BreakableTarget::Return => { + let scope = &self.scopes.breakable_scopes[0]; + if scope.break_destination != Place::return_place() { + span_bug!(span, "`return` in item with no return scope"); + } + (0, Some(scope.break_destination.clone())) + } + BreakableTarget::Break(scope) => { + let break_index = get_scope_index(scope); + ( + break_index, + Some(self.scopes.breakable_scopes[break_index].break_destination.clone()), + ) + } + BreakableTarget::Continue(scope) => { + let break_index = get_scope_index(scope); + (break_index, None) + } + }; + + if let Some(ref destination) = destination { if let Some(value) = value { debug!("stmt_expr Break val block_context.push(SubExpr)"); self.block_context.push(BlockFrame::SubExpr); - unpack!(block = self.into(&destination, block, value)); + unpack!(block = self.into(destination, block, value)); self.block_context.pop(); } else { - self.cfg.push_assign_unit(block, source_info, &destination) + self.cfg.push_assign_unit(block, source_info, destination) } } else { assert!(value.is_none(), "`return` and `break` should have a destination"); } - self.exit_scope(source_info.span, region_scope, block, target_block); + + let region_scope = self.scopes.breakable_scopes[break_index].region_scope; + let scope_index = self.scopes.scope_index(region_scope, span); + let exited_scopes = &self.scopes.scopes[scope_index + 1..]; + let scope_drops = exited_scopes.iter().flat_map(|scope| &scope.drops); + + let drops = &mut self.scopes.breakable_scopes[break_index].drops; + let mut drop_idx = DropIdx::from_u32(destination.is_none() as u32); + for drop in scope_drops { + drop_idx = drops.add_drop(*drop, drop_idx); + } + drops.add_entry(block, drop_idx); + // TODO: explain this hack! + self.cfg.terminate(block, source_info, TerminatorKind::Resume); + self.cfg.start_new_block().unit() } - /// Branch out of `block` to `target`, exiting all scopes up to - /// and including `region_scope`. This will insert whatever drops are - /// needed. See module comment for details. - crate fn exit_scope( + // TODO: use in pop_top_scope. + crate fn exit_top_scope( &mut self, - span: Span, - region_scope: region::Scope, mut block: BasicBlock, target: BasicBlock, + source_info: SourceInfo, ) { - debug!( - "exit_scope(region_scope={:?}, block={:?}, target={:?})", - region_scope, block, target - ); - let scope_count = self.scopes.num_scopes_above(region_scope, span); + block = self.leave_top_scope(block); + self.cfg.terminate(block, source_info, TerminatorKind::Goto { target }); + } + fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock { // If we are emitting a `drop` statement, we need to have the cached // diverge cleanup pads ready in case that drop panics. - let may_panic = self.scopes.may_panic(scope_count); - if may_panic { - self.diverge_cleanup(); - } + let scope = self.scopes.scopes.last().expect("exit_top_scope called with no scopes"); + let is_generator = self.generator_kind.is_some(); + let needs_cleanup = scope.needs_cleanup(); - let mut scopes = self.scopes.top_scopes(scope_count + 1).rev(); - let mut scope = scopes.next().unwrap(); - for next_scope in scopes { - if scope.drops.is_empty() { - scope = next_scope; - continue; + let unwind_to = if needs_cleanup { + let mut drops = self + .scopes + .scopes + .iter() + .flat_map(|scope| &scope.drops) + .filter(|drop| is_generator || drop.kind == DropKind::Value); + let mut next_drop = ROOT_NODE; + let mut drop_info = drops.next().unwrap(); + for previous_drop_info in drops { + next_drop = self.scopes.unwind_drops.add_drop(*drop_info, next_drop); + drop_info = previous_drop_info; } - let source_info = scope.source_info(span); - block = match scope.cached_exits.entry((target, region_scope)) { - Entry::Occupied(e) => { - self.cfg.goto(block, source_info, *e.get()); - return; - } - Entry::Vacant(v) => { - let b = self.cfg.start_new_block(); - self.cfg.goto(block, source_info, b); - v.insert(b); - b - } - }; - - let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| { - debug_assert!(!may_panic, "cached block not present?"); - START_BLOCK - }); - - unpack!( - block = build_scope_drops( - &mut self.cfg, - self.generator_kind, - scope, - block, - unwind_to, - self.arg_count, - false, // not generator - false, // not unwind path - ) - ); - - scope = next_scope; - } - - self.cfg.goto(block, self.scopes.source_info(scope_count, span), target); + next_drop + } else { + DropIdx::MAX + }; + unpack!(build_scope_drops( + &mut self.cfg, + &mut self.scopes.unwind_drops, + scope, + block, + unwind_to, + is_generator && needs_cleanup, + self.arg_count, + )) } - /// Creates a path that performs all required cleanup for dropping a generator. + /// Sets up a path that performs all required cleanup for dropping a generator. /// /// This path terminates in GeneratorDrop. Returns the start of the path. /// None indicates there’s no cleanup to do at this point. - crate fn generator_drop_cleanup(&mut self) -> Option { - // Fill in the cache for unwinds - self.diverge_cleanup_gen(true); - - let src_info = self.scopes.source_info(self.scopes.len(), self.fn_span); - let resume_block = self.resume_block(); - let mut scopes = self.scopes.iter_mut().peekable(); - let mut block = self.cfg.start_new_block(); - let result = block; - - while let Some(scope) = scopes.next() { - block = if let Some(b) = scope.cached_generator_drop { - self.cfg.goto(block, src_info, b); - return Some(result); - } else { - let b = self.cfg.start_new_block(); - scope.cached_generator_drop = Some(b); - self.cfg.goto(block, src_info, b); - b - }; - - let unwind_to = scopes - .peek() - .as_ref() - .map(|scope| { - scope - .cached_unwind - .get(true) - .unwrap_or_else(|| span_bug!(src_info.span, "cached block not present?")) - }) - .unwrap_or(resume_block); - - unpack!( - block = build_scope_drops( - &mut self.cfg, - self.generator_kind, - scope, - block, - unwind_to, - self.arg_count, - true, // is generator - true, // is cached path - ) - ); + pub fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) { + let drops = self.scopes.scopes.iter().flat_map(|scope| &scope.drops); + let mut next_drop = ROOT_NODE; + for drop in drops { + next_drop = self.scopes.generator_drops.add_drop(*drop, next_drop); } - - self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop); - - Some(result) + self.scopes.generator_drops.add_entry(yield_block, next_drop); } /// Creates a new source scope, nested in the current one. @@ -728,15 +629,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - // Schedule an abort block - this is used for some ABIs that cannot unwind - crate fn schedule_abort(&mut self) -> BasicBlock { - let source_info = self.scopes.source_info(self.scopes.len(), self.fn_span); - let abortblk = self.cfg.start_new_cleanup_block(); - self.cfg.terminate(abortblk, source_info, TerminatorKind::Abort); - self.cached_resume_block = Some(abortblk); - abortblk - } - // Scheduling drops // ================ crate fn schedule_drop_storage_and_value( @@ -749,11 +641,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.schedule_drop(span, region_scope, local, DropKind::Value); } - /// Indicates that `place` should be dropped on exit from - /// `region_scope`. + /// Indicates that `place` should be dropped on exit from `region_scope`. /// - /// When called with `DropKind::Storage`, `place` should be a local - /// with an index higher than the current `self.arg_count`. + /// When called with `DropKind::Storage`, `place` shouldn't be the return + /// place, or a function parameter. crate fn schedule_drop( &mut self, span: Span, @@ -761,7 +652,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { local: Local, drop_kind: DropKind, ) { - let needs_drop = match drop_kind { + // TODO: add back in caching. + let _needs_drop = match drop_kind { DropKind::Value => { if !self.hir.needs_drop(self.local_decls[local].ty) { return; @@ -781,71 +673,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } }; - for scope in self.scopes.iter_mut() { - let this_scope = scope.region_scope == region_scope; - // When building drops, we try to cache chains of drops in such a way so these drops - // could be reused by the drops which would branch into the cached (already built) - // blocks. This, however, means that whenever we add a drop into a scope which already - // had some blocks built (and thus, cached) for it, we must invalidate all caches which - // might branch into the scope which had a drop just added to it. This is necessary, - // because otherwise some other code might use the cache to branch into already built - // chain of drops, essentially ignoring the newly added drop. - // - // For example consider there’s two scopes with a drop in each. These are built and - // thus the caches are filled: - // - // +--------------------------------------------------------+ - // | +---------------------------------+ | - // | | +--------+ +-------------+ | +---------------+ | - // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | | - // | | +--------+ +-------------+ | +---------------+ | - // | +------------|outer_scope cache|--+ | - // +------------------------------|middle_scope cache|------+ - // - // Now, a new, inner-most scope is added along with a new drop into both inner-most and - // outer-most scopes: - // - // +------------------------------------------------------------+ - // | +----------------------------------+ | - // | | +--------+ +-------------+ | +---------------+ | +-------------+ - // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) | - // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+ - // | | +-+ +-------------+ | | - // | +---|invalid outer_scope cache|----+ | - // +----=----------------|invalid middle_scope cache|-----------+ - // - // If, when adding `drop(new)` we do not invalidate the cached blocks for both - // outer_scope and middle_scope, then, when building drops for the inner (right-most) - // scope, the old, cached blocks, without `drop(new)` will get used, producing the - // wrong results. - // - // The cache and its invalidation for unwind branch is somewhat special. The cache is - // per-drop, rather than per scope, which has a several different implications. Adding - // a new drop into a scope will not invalidate cached blocks of the prior drops in the - // scope. That is true, because none of the already existing drops will have an edge - // into a block with the newly added drop. - // - // Note that this code iterates scopes from the inner-most to the outer-most, - // invalidating caches of each scope visited. This way bare minimum of the - // caches gets invalidated. i.e., if a new drop is added into the middle scope, the - // cache of outer scope stays intact. - scope.invalidate_cache(!needs_drop, self.generator_kind, this_scope); - if this_scope { - let region_scope_span = - region_scope.span(self.hir.tcx(), &self.hir.region_scope_tree); - // Attribute scope exit drops to scope's closing brace. - let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span); - - scope.drops.push(DropData { - span: scope_end, - local, - kind: drop_kind, - cached_block: CachedBlock::default(), - }); - return; - } - } - span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local); + let scope = self + .scopes + .iter_mut() + .find(|scope| scope.region_scope == region_scope) + .unwrap_or_else(|| { + span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local); + }); + + let region_scope_span = region_scope.span(self.hir.tcx(), &self.hir.region_scope_tree); + // Attribute scope exit drops to scope's closing brace. + let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span); + + scope.drops.push(DropData { span: scope_end, local, kind: drop_kind }); } /// Indicates that the "local operand" stored in `local` is @@ -963,8 +803,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ); } } - - top_scope.invalidate_cache(true, self.generator_kind, true); } else { bug!("Expected as_local_operand to produce a temporary"); } @@ -974,62 +812,29 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { (true_block, false_block) } - /// Creates a path that performs all required cleanup for unwinding. - /// - /// This path terminates in Resume. Returns the start of the path. - /// See module comment for more details. - crate fn diverge_cleanup(&mut self) -> BasicBlock { - self.diverge_cleanup_gen(false) - } - - fn resume_block(&mut self) -> BasicBlock { - if let Some(target) = self.cached_resume_block { - target - } else { - let resumeblk = self.cfg.start_new_cleanup_block(); - self.cfg.terminate( - resumeblk, - SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: self.fn_span }, - TerminatorKind::Resume, - ); - self.cached_resume_block = Some(resumeblk); - resumeblk + fn diverge_cleanup(&mut self) -> DropIdx { + let is_generator = self.is_generator; + let drops = self + .scopes + .scopes + .iter() + .flat_map(|scope| &scope.drops) + .filter(|drop| is_generator || drop.kind == DropKind::Value); + let mut next_drop = ROOT_NODE; + for drop in drops { + next_drop = self.scopes.unwind_drops.add_drop(*drop, next_drop); } + next_drop } - fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock { - // Build up the drops in **reverse** order. The end result will - // look like: - // - // scopes[n] -> scopes[n-1] -> ... -> scopes[0] - // - // However, we build this in **reverse order**. That is, we - // process scopes[0], then scopes[1], etc, pointing each one at - // the result generates from the one before. Along the way, we - // store caches. If everything is cached, we'll just walk right - // to left reading the cached results but never created anything. - - // Find the last cached block - debug!("diverge_cleanup_gen(self.scopes = {:?})", self.scopes); - let cached_cleanup = self.scopes.iter_mut().enumerate().find_map(|(idx, ref scope)| { - let cached_block = scope.cached_unwind.get(generator_drop)?; - Some((cached_block, idx)) - }); - let (mut target, first_uncached) = - cached_cleanup.unwrap_or_else(|| (self.resume_block(), self.scopes.len())); - - for scope in self.scopes.top_scopes(first_uncached) { - target = build_diverge_scope( - &mut self.cfg, - scope.region_scope_span, - scope, - target, - generator_drop, - self.generator_kind, - ); - } - - target + /// Prepares to create a path that performs all required cleanup for + /// unwinding. + /// + /// This path terminates in Resume. The path isn't created until after all + /// of the non-unwind paths in this item have been lowered. + pub fn diverge_from(&mut self, start: BasicBlock) { + let next_drop = self.diverge_cleanup(); + self.scopes.unwind_drops.add_entry(start, next_drop); } /// Utility function for *non*-scope code to build their own drops @@ -1042,16 +847,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ) -> BlockAnd<()> { let source_info = self.source_info(span); let next_target = self.cfg.start_new_block(); - let diverge_target = self.diverge_cleanup(); + self.diverge_from(block); self.cfg.terminate( block, source_info, - TerminatorKind::DropAndReplace { - location, - value, - target: next_target, - unwind: Some(diverge_target), - }, + TerminatorKind::DropAndReplace { location, value, target: next_target, unwind: None }, ); next_target.unit() } @@ -1070,18 +870,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let source_info = self.source_info(span); let success_block = self.cfg.start_new_block(); - let cleanup = self.diverge_cleanup(); + self.diverge_from(block); self.cfg.terminate( block, source_info, - TerminatorKind::Assert { - cond, - expected, - msg, - target: success_block, - cleanup: Some(cleanup), - }, + TerminatorKind::Assert { cond, expected, msg, target: success_block, cleanup: None }, ); success_block @@ -1099,20 +893,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { assert_eq!(top_scope.region_scope, region_scope); top_scope.drops.clear(); - top_scope.invalidate_cache(false, self.generator_kind, true); } } /// Builds drops for pop_scope and exit_scope. fn build_scope_drops<'tcx>( cfg: &mut CFG<'tcx>, - generator_kind: Option, + unwind_drops: &mut DropTree, scope: &Scope, mut block: BasicBlock, - last_unwind_to: BasicBlock, + mut unwind_to: DropIdx, + storage_dead_on_unwind: bool, arg_count: usize, - generator_drop: bool, - is_cached_path: bool, ) -> BlockAnd<()> { debug!("build_scope_drops({:?} -> {:?})", block, scope); @@ -1135,8 +927,7 @@ fn build_scope_drops<'tcx>( // drops for the unwind path should have already been generated by // `diverge_cleanup_gen`. - for drop_idx in (0..scope.drops.len()).rev() { - let drop_data = &scope.drops[drop_idx]; + for drop_data in scope.drops.iter().rev() { let source_info = scope.source_info(drop_data.span); let local = drop_data.local; @@ -1146,26 +937,25 @@ fn build_scope_drops<'tcx>( // path, then don't generate the drop. (We only take this into // account for non-unwind paths so as not to disturb the // caching mechanism.) - if !is_cached_path && scope.moved_locals.iter().any(|&o| o == local) { + if scope.moved_locals.iter().any(|&o| o == local) { + unwind_to = unwind_drops.drops[unwind_to].1; continue; } - let unwind_to = get_unwind_to(scope, generator_kind, drop_idx, generator_drop) - .unwrap_or(last_unwind_to); + unwind_drops.entry_points.push((unwind_to, block)); let next = cfg.start_new_block(); cfg.terminate( block, source_info, - TerminatorKind::Drop { - location: local.into(), - target: next, - unwind: Some(unwind_to), - }, + TerminatorKind::Drop { location: local.into(), target: next, unwind: None }, ); block = next; } DropKind::Storage => { + if storage_dead_on_unwind { + unwind_to = unwind_drops.drops[unwind_to].1; + } // Only temps and vars need their storage dead. assert!(local.index() > arg_count); cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(local) }); @@ -1175,139 +965,270 @@ fn build_scope_drops<'tcx>( block.unit() } -fn get_unwind_to( - scope: &Scope, - generator_kind: Option, - unwind_from: usize, - generator_drop: bool, -) -> Option { - for drop_idx in (0..unwind_from).rev() { - let drop_data = &scope.drops[drop_idx]; - match (generator_kind, &drop_data.kind) { - (Some(_), DropKind::Storage) => { - return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| { - span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) - })); +trait DropTreeBuilder<'tcx> { + fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock; + fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock); +} +impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { + fn build_exit_tree( + &mut self, + mut drops: DropTree, + continue_block: Option, + ) -> Option> { + let mut blocks = IndexVec::from_elem(None, &drops.drops); + if continue_block.is_some() { + blocks[CONTINUE_NODE] = continue_block; + debug_assert_eq!(drops.num_roots, DropIdx::new(2)); + } else { + debug_assert_eq!(drops.num_roots, CONTINUE_NODE); + } + build_drop_tree::(&mut self.cfg, &mut drops, &mut blocks); + if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) { + let unwind_target = self.diverge_cleanup(); + let num_roots = drops.num_roots.index(); + let mut unwind_indices = IndexVec::from_elem_n(unwind_target, num_roots); + for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(num_roots) { + match drop_data.0.kind { + DropKind::Storage => { + if self.is_generator { + let unwind_drop = self + .scopes + .unwind_drops + .add_drop(drop_data.0, unwind_indices[drop_data.1]); + unwind_indices.push(unwind_drop); + } else { + unwind_indices.push(unwind_indices[drop_data.1]); + } + } + DropKind::Value => { + let unwind_drop = self + .scopes + .unwind_drops + .add_drop(drop_data.0, unwind_indices[drop_data.1]); + self.scopes + .unwind_drops + .add_entry(blocks[drop_idx].unwrap(), unwind_indices[drop_data.1]); + unwind_indices.push(unwind_drop); + } + } } - (None, DropKind::Value) => { - return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| { - span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) - })); + } + blocks[ROOT_NODE].map(BasicBlock::unit) + } + + crate fn build_drop_trees(&mut self, should_abort: bool) { + if self.is_generator { + Self::build_generator_drop_tree( + &mut self.cfg, + &mut self.scopes.generator_drops, + self.fn_span, + should_abort, + ); + } + Self::build_unwind_tree( + &mut self.cfg, + &mut self.scopes.unwind_drops, + self.fn_span, + should_abort, + ); + } + + fn build_generator_drop_tree( + cfg: &mut CFG<'tcx>, + drops: &mut DropTree, + fn_span: Span, + should_abort: bool, + ) { + let mut blocks = IndexVec::from_elem(None, &drops.drops); + build_drop_tree::(cfg, drops, &mut blocks); + // TODO: unwind? + if let Some(root_block) = blocks[ROOT_NODE] { + cfg.terminate( + root_block, + SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: fn_span }, + TerminatorKind::GeneratorDrop, + ); + } + // Reuse the generator drop tree as the unwind tree. + // + // This is a different tree to the standard unwind paths here to + // prevent drop elaboration from creating drop flags that would have + // to be captured by the generator. I'm not sure how important this + // optimization is, but it is here. + for (drop_idx, drop_data) in drops.drops.iter_enumerated() { + if let DropKind::Value = drop_data.0.kind { + drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap())); } - _ => (), + } + Self::build_unwind_tree(cfg, drops, fn_span, should_abort); + } + + fn build_unwind_tree( + cfg: &mut CFG<'tcx>, + drops: &mut DropTree, + fn_span: Span, + should_abort: bool, + ) { + let mut blocks = IndexVec::from_elem(None, &drops.drops); + build_drop_tree::(cfg, drops, &mut blocks); + if let Some(resume_block) = blocks[ROOT_NODE] { + let terminator = + if should_abort { TerminatorKind::Abort } else { TerminatorKind::Resume }; + cfg.terminate( + resume_block, + SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: fn_span }, + terminator, + ); } } - None } -fn build_diverge_scope<'tcx>( +fn source_info(span: Span) -> SourceInfo { + SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE } +} + +fn build_drop_tree<'tcx, T: DropTreeBuilder<'tcx>>( cfg: &mut CFG<'tcx>, - span: Span, - scope: &mut Scope, - mut target: BasicBlock, - generator_drop: bool, - generator_kind: Option, -) -> BasicBlock { - // Build up the drops in **reverse** order. The end result will - // look like: - // - // [drops[n]] -...-> [drops[0]] -> [target] - // - // The code in this function reads from right to left. At each - // point, we check for cached blocks representing the - // remainder. If everything is cached, we'll just walk right to - // left reading the cached results but never create anything. - - let source_scope = scope.source_scope; - let source_info = |span| SourceInfo { span, scope: source_scope }; - - // We keep track of StorageDead statements to prepend to our current block - // and store them here, in reverse order. - let mut storage_deads = vec![]; - - let mut target_built_by_us = false; - - // Build up the drops. Here we iterate the vector in - // *forward* order, so that we generate drops[0] first (right to - // left in diagram above). - debug!("build_diverge_scope({:?})", scope.drops); - for (j, drop_data) in scope.drops.iter_mut().enumerate() { - debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data); - // Only full value drops are emitted in the diverging path, - // not StorageDead, except in the case of generators. - // - // Note: This may not actually be what we desire (are we - // "freeing" stack storage as we unwind, or merely observing a - // frozen stack)? In particular, the intent may have been to - // match the behavior of clang, but on inspection eddyb says - // this is not what clang does. - match drop_data.kind { - DropKind::Storage if generator_kind.is_some() => { - storage_deads.push(Statement { - source_info: source_info(drop_data.span), - kind: StatementKind::StorageDead(drop_data.local), - }); - if !target_built_by_us { - // We cannot add statements to an existing block, so we create a new - // block for our StorageDead statements. - let block = cfg.start_new_cleanup_block(); - let source_info = SourceInfo { span: DUMMY_SP, scope: source_scope }; - cfg.goto(block, source_info, target); - target = block; - target_built_by_us = true; + drops: &mut DropTree, + blocks: &mut IndexVec>, +) { + debug!("build_drop_tree(drops = {:#?})", drops); + // TODO: Some comment about this. + #[derive(Clone, Copy)] + enum NeedsBlock { + NoPredecessor, + CanShare(DropIdx), + NeedsOwn, + } + + // TODO: Split this into two functions. + + // If a drop has multiple predecessors, they need to be in separate blocks + // so that they can both banch to the current drop. + let mut needs_block = IndexVec::from_elem(NeedsBlock::NoPredecessor, &drops.drops); + for root_idx in (ROOT_NODE..drops.num_roots).skip(1) { + needs_block[root_idx] = NeedsBlock::NeedsOwn; + } + + let entry_points = &mut drops.entry_points; + entry_points.sort(); + + for (drop_idx, drop_data) in drops.drops.iter_enumerated().rev() { + if entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) { + let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg)); + needs_block[drop_idx] = NeedsBlock::NeedsOwn; + while entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) { + let entry_block = entry_points.pop().unwrap().1; + T::add_entry(cfg, entry_block, block); + } + } + match needs_block[drop_idx] { + NeedsBlock::NoPredecessor => continue, + NeedsBlock::NeedsOwn => { + blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg)); + } + NeedsBlock::CanShare(pred) => { + blocks[drop_idx] = blocks[pred]; + } + } + if let DropKind::Value = drop_data.0.kind { + needs_block[drop_data.1] = NeedsBlock::NeedsOwn; + } else { + if drop_idx >= drops.num_roots { + match &mut needs_block[drop_data.1] { + pred @ NeedsBlock::NoPredecessor => *pred = NeedsBlock::CanShare(drop_idx), + pred @ NeedsBlock::CanShare(_) => *pred = NeedsBlock::NeedsOwn, + NeedsBlock::NeedsOwn => (), } - *drop_data.cached_block.ref_mut(generator_drop) = Some(target); } - DropKind::Storage => {} + } + } + assert!(entry_points.is_empty()); + debug!("build_drop_tree: blocks = {:#?}", blocks); + + for (drop_idx, drop_data) in drops.drops.iter_enumerated().rev() { + if let NeedsBlock::NoPredecessor = needs_block[drop_idx] { + continue; + } + match drop_data.0.kind { DropKind::Value => { - let cached_block = drop_data.cached_block.ref_mut(generator_drop); - target = if let Some(cached_block) = *cached_block { - storage_deads.clear(); - target_built_by_us = false; - cached_block - } else { - push_storage_deads(cfg, target, &mut storage_deads); - let block = cfg.start_new_cleanup_block(); - cfg.terminate( - block, - source_info(drop_data.span), - TerminatorKind::Drop { - location: drop_data.local.into(), - target, - unwind: None, - }, - ); - *cached_block = Some(block); - target_built_by_us = true; - block + let terminator = TerminatorKind::Drop { + target: blocks[drop_data.1].unwrap(), + // TODO: The caller will register this if needed. + unwind: None, + location: drop_data.0.local.into(), }; + cfg.terminate(blocks[drop_idx].unwrap(), source_info(drop_data.0.span), terminator); } - }; + // Root nodes don't correspond to a drop. + DropKind::Storage if drop_idx < drops.num_roots => {} + DropKind::Storage => { + let block = blocks[drop_idx].unwrap(); + let stmt = Statement { + source_info: source_info(drop_data.0.span), + kind: StatementKind::StorageDead(drop_data.0.local), + }; + cfg.push(block, stmt); + let target = blocks[drop_data.1].unwrap(); + if target != block { + let terminator = TerminatorKind::Goto { target }; + cfg.terminate(block, source_info(drop_data.0.span), terminator); + } + } + } } - push_storage_deads(cfg, target, &mut storage_deads); - *scope.cached_unwind.ref_mut(generator_drop) = Some(target); +} - assert!(storage_deads.is_empty()); - debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target); +struct ExitScopes; - target +impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes { + fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { + cfg.start_new_block() + } + fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { + cfg.block_data_mut(from).terminator_mut().kind = TerminatorKind::Goto { target: to }; + } } -fn push_storage_deads<'tcx>( - cfg: &mut CFG<'tcx>, - target: BasicBlock, - storage_deads: &mut Vec>, -) { - if storage_deads.is_empty() { - return; +struct GeneratorDrop; + +impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop { + fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { + cfg.start_new_block() + } + fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { + let kind = &mut cfg.block_data_mut(from).terminator_mut().kind; + if let TerminatorKind::Yield { drop, .. } = kind { + *drop = Some(to); + }; + } +} + +struct Unwind; + +impl<'tcx> DropTreeBuilder<'tcx> for Unwind { + fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { + cfg.start_new_cleanup_block() + } + fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { + let term = &mut cfg.block_data_mut(from).terminator_mut().kind; + match term { + TerminatorKind::Drop { unwind, .. } + | TerminatorKind::DropAndReplace { unwind, .. } + | TerminatorKind::FalseUnwind { unwind, .. } + | TerminatorKind::Call { cleanup: unwind, .. } + | TerminatorKind::Assert { cleanup: unwind, .. } => { + *unwind = Some(to); + } + TerminatorKind::Goto { .. } + | TerminatorKind::SwitchInt { .. } + | TerminatorKind::Resume + | TerminatorKind::Abort + | TerminatorKind::Return + | TerminatorKind::Unreachable + | TerminatorKind::Yield { .. } + | TerminatorKind::GeneratorDrop + | TerminatorKind::FalseEdges { .. } => bug!("cannot unwind from {:?}", term), + } } - let statements = &mut cfg.block_data_mut(target).statements; - storage_deads.reverse(); - debug!( - "push_storage_deads({:?}), storage_deads={:?}, statements={:?}", - target, storage_deads, statements - ); - storage_deads.append(statements); - mem::swap(statements, storage_deads); - assert!(storage_deads.is_empty()); } From 10817cc331e3b7b28cd5034a4ad07667136e9b0a Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 16 Nov 2019 14:21:59 +0000 Subject: [PATCH 03/16] Temp: Cleanup 1 --- src/librustc_mir_build/build/scope.rs | 60 +++++++++++++++++---------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index 443e15e6c747a..a2e8ead5a04bc 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -283,7 +283,7 @@ impl<'tcx> Scopes<'tcx> { /// Returns the topmost active scope, which is known to be alive until /// the next scope expression. - pub(super) fn topmost(&self) -> region::Scope { + fn topmost(&self) -> region::Scope { self.scopes.last().expect("topmost_scope: no scopes present").region_scope } @@ -297,7 +297,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // ========================== // Start a breakable scope, which tracks where `continue`, `break` and // `return` should branch to. - pub fn in_breakable_scope( + crate fn in_breakable_scope( &mut self, loop_block: Option, break_destination: Place<'tcx>, @@ -547,7 +547,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// /// This path terminates in GeneratorDrop. Returns the start of the path. /// None indicates there’s no cleanup to do at this point. - pub fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) { + crate fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) { let drops = self.scopes.scopes.iter().flat_map(|scope| &scope.drops); let mut next_drop = ROOT_NODE; for drop in drops { @@ -832,7 +832,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// /// This path terminates in Resume. The path isn't created until after all /// of the non-unwind paths in this item have been lowered. - pub fn diverge_from(&mut self, start: BasicBlock) { + crate fn diverge_from(&mut self, start: BasicBlock) { let next_drop = self.diverge_cleanup(); self.scopes.unwind_drops.add_entry(start, next_drop); } @@ -887,7 +887,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// /// This is only needed for `match` arm scopes, because they have one /// entrance per pattern, but only one exit. - pub(crate) fn clear_top_scope(&mut self, region_scope: region::Scope) { + crate fn clear_top_scope(&mut self, region_scope: region::Scope) { let top_scope = self.scopes.scopes.last_mut().unwrap(); assert_eq!(top_scope.region_scope, region_scope); @@ -1018,30 +1018,24 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { crate fn build_drop_trees(&mut self, should_abort: bool) { if self.is_generator { - Self::build_generator_drop_tree( + self.build_generator_drop_trees(should_abort); + } else { + Self::build_unwind_tree( &mut self.cfg, - &mut self.scopes.generator_drops, + &mut self.scopes.unwind_drops, self.fn_span, should_abort, ); } - Self::build_unwind_tree( - &mut self.cfg, - &mut self.scopes.unwind_drops, - self.fn_span, - should_abort, - ); } - fn build_generator_drop_tree( - cfg: &mut CFG<'tcx>, - drops: &mut DropTree, - fn_span: Span, - should_abort: bool, - ) { + fn build_generator_drop_trees(&mut self, should_abort: bool) { + // Build the drop tree for dropping the generator while it's suspended. + let drops = &mut self.scopes.generator_drops; + let cfg = &mut self.cfg; + let fn_span = self.fn_span; let mut blocks = IndexVec::from_elem(None, &drops.drops); build_drop_tree::(cfg, drops, &mut blocks); - // TODO: unwind? if let Some(root_block) = blocks[ROOT_NODE] { cfg.terminate( root_block, @@ -1049,7 +1043,13 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { TerminatorKind::GeneratorDrop, ); } - // Reuse the generator drop tree as the unwind tree. + + // Build the drop tree for unwinding in the normal control flow paths. + let resume_block = + Self::build_unwind_tree(cfg, &mut self.scopes.unwind_drops, fn_span, should_abort); + + // Build the drop tree for unwinding when dropping a suspended + // generator. // // This is a different tree to the standard unwind paths here to // prevent drop elaboration from creating drop flags that would have @@ -1060,7 +1060,18 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap())); } } - Self::build_unwind_tree(cfg, drops, fn_span, should_abort); + let mut blocks = IndexVec::from_elem(None, &drops.drops); + blocks[ROOT_NODE] = resume_block; + build_drop_tree::(cfg, drops, &mut blocks); + if let (None, Some(new_resume_block)) = (resume_block, blocks[ROOT_NODE]) { + let terminator = + if should_abort { TerminatorKind::Abort } else { TerminatorKind::Resume }; + cfg.terminate( + new_resume_block, + SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: fn_span }, + terminator, + ); + } } fn build_unwind_tree( @@ -1068,7 +1079,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { drops: &mut DropTree, fn_span: Span, should_abort: bool, - ) { + ) -> Option { let mut blocks = IndexVec::from_elem(None, &drops.drops); build_drop_tree::(cfg, drops, &mut blocks); if let Some(resume_block) = blocks[ROOT_NODE] { @@ -1079,6 +1090,9 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: fn_span }, terminator, ); + Some(resume_block) + } else { + None } } } From dca812134968e004490b652dedf089b4beba2984 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 16 Nov 2019 14:37:27 +0000 Subject: [PATCH 04/16] Temp: Cleanup 2 (SourceInfo) --- src/librustc_mir_build/build/scope.rs | 39 +++++++++++---------------- 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index a2e8ead5a04bc..92014a2f9d244 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -134,7 +134,7 @@ struct Scope { struct DropData { /// The `Span` where drop obligation was incurred (typically where place was /// declared) - span: Span, + source_info: SourceInfo, /// local to drop local: Local, @@ -179,8 +179,7 @@ const CONTINUE_NODE: DropIdx = DropIdx::from_u32_const(1); // TODO say some more. #[derive(Debug)] struct DropTree { - /// The next item to drop, if there is one. - // TODO actual comment + /// Drops in the tree. drops: IndexVec, /// Map for finding the inverse of the `next_drop` relation: /// @@ -193,11 +192,6 @@ struct DropTree { } impl Scope { - /// Given a span and this scope's source scope, make a SourceInfo. - fn source_info(&self, span: Span) -> SourceInfo { - SourceInfo { span, scope: self.source_scope } - } - /// Whether there's anything to do for the cleanup path, that is, /// when unwinding through this scope. This includes destructors, /// but not StorageDead statements, which don't get emitted at all @@ -219,7 +213,9 @@ impl Scope { impl DropTree { fn new(num_roots: usize) -> Self { - let fake_data = DropData { span: DUMMY_SP, local: Local::MAX, kind: DropKind::Storage }; + let fake_source_info = SourceInfo { span: DUMMY_SP, scope: OUTERMOST_SOURCE_SCOPE }; + let fake_data = + DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage }; let drop_idx = DropIdx::MAX; let drops = IndexVec::from_elem_n((fake_data, drop_idx), num_roots); Self { @@ -286,10 +282,6 @@ impl<'tcx> Scopes<'tcx> { fn topmost(&self) -> region::Scope { self.scopes.last().expect("topmost_scope: no scopes present").region_scope } - - // fn source_info(&self, index: usize, span: Span) -> SourceInfo { - // self.scopes[self.len() - index].source_info(span) - // } } impl<'a, 'tcx> Builder<'a, 'tcx> { @@ -497,7 +489,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.cfg.start_new_block().unit() } - // TODO: use in pop_top_scope. crate fn exit_top_scope( &mut self, mut block: BasicBlock, @@ -685,7 +676,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Attribute scope exit drops to scope's closing brace. let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span); - scope.drops.push(DropData { span: scope_end, local, kind: drop_kind }); + scope.drops.push(DropData { + source_info: SourceInfo { span: scope_end, scope: scope.source_scope }, + local, + kind: drop_kind, + }); } /// Indicates that the "local operand" stored in `local` is @@ -790,7 +785,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { bug!("Drop scheduled on top of condition variable") } DropKind::Storage => { - let source_info = top_scope.source_info(top_drop_data.span); + let source_info = top_drop_data.source_info; let local = top_drop_data.local; assert_eq!(local, cond_temp, "Drop scheduled on top of condition"); self.cfg.push( @@ -928,7 +923,7 @@ fn build_scope_drops<'tcx>( // `diverge_cleanup_gen`. for drop_data in scope.drops.iter().rev() { - let source_info = scope.source_info(drop_data.span); + let source_info = drop_data.source_info; let local = drop_data.local; match drop_data.kind { @@ -1097,10 +1092,6 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { } } -fn source_info(span: Span) -> SourceInfo { - SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE } -} - fn build_drop_tree<'tcx, T: DropTreeBuilder<'tcx>>( cfg: &mut CFG<'tcx>, drops: &mut DropTree, @@ -1172,21 +1163,21 @@ fn build_drop_tree<'tcx, T: DropTreeBuilder<'tcx>>( unwind: None, location: drop_data.0.local.into(), }; - cfg.terminate(blocks[drop_idx].unwrap(), source_info(drop_data.0.span), terminator); + cfg.terminate(blocks[drop_idx].unwrap(), drop_data.0.source_info, terminator); } // Root nodes don't correspond to a drop. DropKind::Storage if drop_idx < drops.num_roots => {} DropKind::Storage => { let block = blocks[drop_idx].unwrap(); let stmt = Statement { - source_info: source_info(drop_data.0.span), + source_info: drop_data.0.source_info, kind: StatementKind::StorageDead(drop_data.0.local), }; cfg.push(block, stmt); let target = blocks[drop_data.1].unwrap(); if target != block { let terminator = TerminatorKind::Goto { target }; - cfg.terminate(block, source_info(drop_data.0.span), terminator); + cfg.terminate(block, drop_data.0.source_info, terminator); } } } From d9a44e6652d53b74a665c1ee32c813315033cccc Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 16 Nov 2019 15:53:27 +0000 Subject: [PATCH 05/16] Temp: Cleanup 3 (comments) --- src/librustc_mir_build/build/scope.rs | 299 ++++++++++++++------------ 1 file changed, 164 insertions(+), 135 deletions(-) diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index 92014a2f9d244..e9b09d97c54e0 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -6,30 +6,31 @@ contents, and then pop it off. Every scope is named by a ### SEME Regions -When pushing a new scope, we record the current point in the graph (a +When pushing a new [Scope], we record the current point in the graph (a basic block); this marks the entry to the scope. We then generate more stuff in the control-flow graph. Whenever the scope is exited, either via a `break` or `return` or just by fallthrough, that marks an exit from the scope. Each lexical scope thus corresponds to a single-entry, multiple-exit (SEME) region in the control-flow graph. -For now, we keep a mapping from each `region::Scope` to its -corresponding SEME region for later reference (see caveat in next -paragraph). This is because region scopes are tied to -them. Eventually, when we shift to non-lexical lifetimes, there should -be no need to remember this mapping. +For now, we record the `region::Scope` to each SEME region for later reference +(see caveat in next paragraph). This is because destruction scopes are tied to +them. This may change in the future so that MIR lowering determines its own +destruction scopes. ### Not so SEME Regions In the course of building matches, it sometimes happens that certain code (namely guards) gets executed multiple times. This means that the scope lexical scope may in fact correspond to multiple, disjoint SEME regions. So in fact our -mapping is from one scope to a vector of SEME regions. +mapping is from one scope to a vector of SEME regions. Since the SEME regions +are disjoint, the mapping is still one-to-one for the set of SEME regions that +we're currently in. -Also in matches, the scopes assigned to arms are not even SEME regions! Each -arm has a single region with one entry for each pattern. We manually +Also in matches, the scopes assigned to arms are not always even SEME regions! +Each arm has a single region with one entry for each pattern. We manually manipulate the scheduled drops in this scope to avoid dropping things multiple -times, although drop elaboration would clean this up for value drops. +times. ### Drops @@ -60,25 +61,23 @@ that for now); any later drops would also drop `y`. There are numerous "normal" ways to early exit a scope: `break`, `continue`, `return` (panics are handled separately). Whenever an -early exit occurs, the method `exit_scope` is called. It is given the +early exit occurs, the method `break_scope` is called. It is given the current point in execution where the early exit occurs, as well as the scope you want to branch to (note that all early exits from to some -other enclosing scope). `exit_scope` will record this exit point and -also add all drops. +other enclosing scope). `break_scope` will record the set of drops currently +scheduled in a [DropTree]. Later, before `in_breakable_scope` exits, the drops +will be added to the CFG. -Panics are handled in a similar fashion, except that a panic always -returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call -`panic(p)` with the current point `p`. Or else you can call -`diverge_cleanup`, which will produce a block that you can branch to -which does the appropriate cleanup and then diverges. `panic(p)` -simply calls `diverge_cleanup()` and adds an edge from `p` to the -result. +Panics are handled in a similar fashion, except that the drops are added to the +mir once the rest of the function has finished being lowered. If a terminator +can panic, call `diverge_from(block)` with the block containing the terminator +`block`. -### Loop scopes +### Breakable scopes In addition to the normal scope stack, we track a loop scope stack -that contains only loops. It tracks where a `break` and `continue` -should go to. +that contains only loops and breakable blocks. It tracks where a `break`, +`continue` or `return` should go to. */ @@ -106,7 +105,7 @@ pub struct Scopes<'tcx> { /// Drops that need to be done on paths to the `GeneratorDrop` terminator. generator_drops: DropTree, - // TODO: what's this? + // TODO: implement caching // cached_unwind_drop: DropIdx, } @@ -175,8 +174,15 @@ rustc_index::newtype_index! { const ROOT_NODE: DropIdx = DropIdx::from_u32_const(0); const CONTINUE_NODE: DropIdx = DropIdx::from_u32_const(1); -/// A tree of drops that we have deferred lowering. -// TODO say some more. +/// A tree (usually, sometimes this is a forest of two trees) of drops that we +/// have deferred lowering. It's used for: +/// +/// * Drops on unwind paths +/// * Drops on generator drop paths (when a suspended generator is dropped) +/// * Drops on return and loop exit paths +/// +/// Once no more nodes could be added to the tree, we lower it to MIR in one go +/// in `build_drop_tree`. #[derive(Debug)] struct DropTree { /// Drops in the tree. @@ -187,8 +193,8 @@ struct DropTree { previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>, /// Edges into the `DropTree` that need to be added once it's lowered. entry_points: Vec<(DropIdx, BasicBlock)>, - /// The number of root nodes in the tree. - num_roots: DropIdx, + /// The first non-root nodes in the forest. + first_non_root: DropIdx, } impl Scope { @@ -211,6 +217,13 @@ impl Scope { } } +/// A trait that determined how [DropTree::lower_to_mir] creates its blocks and +/// links to any entry nodes. +trait DropTreeBuilder<'tcx> { + fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock; + fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock); +} + impl DropTree { fn new(num_roots: usize) -> Self { let fake_source_info = SourceInfo { span: DUMMY_SP, scope: OUTERMOST_SOURCE_SCOPE }; @@ -220,7 +233,7 @@ impl DropTree { let drops = IndexVec::from_elem_n((fake_data, drop_idx), num_roots); Self { drops, - num_roots: DropIdx::from_usize(num_roots), + first_non_root: DropIdx::from_usize(num_roots), entry_points: Vec::new(), previous_drops: FxHashMap::default(), } @@ -237,6 +250,119 @@ impl DropTree { fn add_entry(&mut self, from: BasicBlock, to: DropIdx) { self.entry_points.push((to, from)); } + + fn build_mir<'tcx, T: DropTreeBuilder<'tcx>>( + &mut self, + cfg: &mut CFG<'tcx>, + blocks: &mut IndexVec>, + ) { + debug!("DropTree::lower_to_mir(drops = {:#?})", self); + + self.assign_blocks::(cfg, blocks); + self.link_blocks(cfg, blocks) + } + + /// Assign blocks for all of the drops in the drop tree that need them. + fn assign_blocks<'tcx, T: DropTreeBuilder<'tcx>>( + &mut self, + cfg: &mut CFG<'tcx>, + blocks: &mut IndexVec>, + ) { + // StorageDead statements can share blocks with each other and also with + // a Drop terminator. We iterate through the blocks to find which blocks + // need + #[derive(Clone, Copy)] + enum Block { + // This drop is unreachable + None, + // This drop is only reachable through the `StorageDead` with the + // specified index. + Shares(DropIdx), + // This drop has more than one way of being reached, or it is + // branched to from outside the tree, or it's predecessor is a + // `Value` drop. + Own, + } + + let mut needs_block = IndexVec::from_elem(Block::None, &self.drops); + if self.first_non_root > CONTINUE_NODE { + // `continue` already has its own node. + needs_block[CONTINUE_NODE] = Block::Own; + } + + // Sort so that we only need to check the last + let entry_points = &mut self.entry_points; + entry_points.sort(); + + for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() { + if entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) { + let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg)); + needs_block[drop_idx] = Block::Own; + while entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) { + let entry_block = entry_points.pop().unwrap().1; + T::add_entry(cfg, entry_block, block); + } + } + match needs_block[drop_idx] { + Block::None => continue, + Block::Own => { + blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg)); + } + Block::Shares(pred) => { + blocks[drop_idx] = blocks[pred]; + } + } + if let DropKind::Value = drop_data.0.kind { + needs_block[drop_data.1] = Block::Own; + } else { + if drop_idx >= self.first_non_root { + match &mut needs_block[drop_data.1] { + pred @ Block::None => *pred = Block::Shares(drop_idx), + pred @ Block::Shares(_) => *pred = Block::Own, + Block::Own => (), + } + } + } + } + + debug!("assign_blocks: blocks = {:#?}", blocks); + assert!(entry_points.is_empty()); + } + + fn link_blocks(&self, cfg: &mut CFG<'tcx>, blocks: &IndexVec>) { + for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() { + let block = if let Some(block) = blocks[drop_idx] { + block + } else { + continue; + }; + match drop_data.0.kind { + DropKind::Value => { + let terminator = TerminatorKind::Drop { + target: blocks[drop_data.1].unwrap(), + // The caller will handle this if needed. + unwind: None, + location: drop_data.0.local.into(), + }; + cfg.terminate(block, drop_data.0.source_info, terminator); + } + // Root nodes don't correspond to a drop. + DropKind::Storage if drop_idx < self.first_non_root => {} + DropKind::Storage => { + let stmt = Statement { + source_info: drop_data.0.source_info, + kind: StatementKind::StorageDead(drop_data.0.local), + }; + cfg.push(block, stmt); + let target = blocks[drop_data.1].unwrap(); + if target != block { + let terminator = TerminatorKind::Goto { target }; + cfg.terminate(block, drop_data.0.source_info, terminator); + } + } + } + } + } } impl<'tcx> Scopes<'tcx> { @@ -483,7 +609,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { drop_idx = drops.add_drop(*drop, drop_idx); } drops.add_entry(block, drop_idx); - // TODO: explain this hack! + // `build_drop_tree` doesn't have access to our source_info, so we + // create a dummy terminator now. `TerminatorKind::Resume` is used + // because MIR type checking will panic if it hasn't been overwritten. self.cfg.terminate(block, source_info, TerminatorKind::Resume); self.cfg.start_new_block().unit() @@ -891,7 +1019,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } -/// Builds drops for pop_scope and exit_scope. +/// Builds drops for pop_scope and leave_top_scope. fn build_scope_drops<'tcx>( cfg: &mut CFG<'tcx>, unwind_drops: &mut DropTree, @@ -960,10 +1088,6 @@ fn build_scope_drops<'tcx>( block.unit() } -trait DropTreeBuilder<'tcx> { - fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock; - fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock); -} impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { fn build_exit_tree( &mut self, @@ -973,14 +1097,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { let mut blocks = IndexVec::from_elem(None, &drops.drops); if continue_block.is_some() { blocks[CONTINUE_NODE] = continue_block; - debug_assert_eq!(drops.num_roots, DropIdx::new(2)); - } else { - debug_assert_eq!(drops.num_roots, CONTINUE_NODE); } - build_drop_tree::(&mut self.cfg, &mut drops, &mut blocks); + drops.build_mir::(&mut self.cfg, &mut blocks); if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) { let unwind_target = self.diverge_cleanup(); - let num_roots = drops.num_roots.index(); + let num_roots = drops.first_non_root.index(); let mut unwind_indices = IndexVec::from_elem_n(unwind_target, num_roots); for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(num_roots) { match drop_data.0.kind { @@ -1030,7 +1151,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { let cfg = &mut self.cfg; let fn_span = self.fn_span; let mut blocks = IndexVec::from_elem(None, &drops.drops); - build_drop_tree::(cfg, drops, &mut blocks); + drops.build_mir::(cfg, &mut blocks); if let Some(root_block) = blocks[ROOT_NODE] { cfg.terminate( root_block, @@ -1057,7 +1178,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { } let mut blocks = IndexVec::from_elem(None, &drops.drops); blocks[ROOT_NODE] = resume_block; - build_drop_tree::(cfg, drops, &mut blocks); + drops.build_mir::(cfg, &mut blocks); if let (None, Some(new_resume_block)) = (resume_block, blocks[ROOT_NODE]) { let terminator = if should_abort { TerminatorKind::Abort } else { TerminatorKind::Resume }; @@ -1076,7 +1197,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { should_abort: bool, ) -> Option { let mut blocks = IndexVec::from_elem(None, &drops.drops); - build_drop_tree::(cfg, drops, &mut blocks); + drops.build_mir::(cfg, &mut blocks); if let Some(resume_block) = blocks[ROOT_NODE] { let terminator = if should_abort { TerminatorKind::Abort } else { TerminatorKind::Resume }; @@ -1092,98 +1213,6 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { } } -fn build_drop_tree<'tcx, T: DropTreeBuilder<'tcx>>( - cfg: &mut CFG<'tcx>, - drops: &mut DropTree, - blocks: &mut IndexVec>, -) { - debug!("build_drop_tree(drops = {:#?})", drops); - // TODO: Some comment about this. - #[derive(Clone, Copy)] - enum NeedsBlock { - NoPredecessor, - CanShare(DropIdx), - NeedsOwn, - } - - // TODO: Split this into two functions. - - // If a drop has multiple predecessors, they need to be in separate blocks - // so that they can both banch to the current drop. - let mut needs_block = IndexVec::from_elem(NeedsBlock::NoPredecessor, &drops.drops); - for root_idx in (ROOT_NODE..drops.num_roots).skip(1) { - needs_block[root_idx] = NeedsBlock::NeedsOwn; - } - - let entry_points = &mut drops.entry_points; - entry_points.sort(); - - for (drop_idx, drop_data) in drops.drops.iter_enumerated().rev() { - if entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) { - let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg)); - needs_block[drop_idx] = NeedsBlock::NeedsOwn; - while entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) { - let entry_block = entry_points.pop().unwrap().1; - T::add_entry(cfg, entry_block, block); - } - } - match needs_block[drop_idx] { - NeedsBlock::NoPredecessor => continue, - NeedsBlock::NeedsOwn => { - blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg)); - } - NeedsBlock::CanShare(pred) => { - blocks[drop_idx] = blocks[pred]; - } - } - if let DropKind::Value = drop_data.0.kind { - needs_block[drop_data.1] = NeedsBlock::NeedsOwn; - } else { - if drop_idx >= drops.num_roots { - match &mut needs_block[drop_data.1] { - pred @ NeedsBlock::NoPredecessor => *pred = NeedsBlock::CanShare(drop_idx), - pred @ NeedsBlock::CanShare(_) => *pred = NeedsBlock::NeedsOwn, - NeedsBlock::NeedsOwn => (), - } - } - } - } - assert!(entry_points.is_empty()); - debug!("build_drop_tree: blocks = {:#?}", blocks); - - for (drop_idx, drop_data) in drops.drops.iter_enumerated().rev() { - if let NeedsBlock::NoPredecessor = needs_block[drop_idx] { - continue; - } - match drop_data.0.kind { - DropKind::Value => { - let terminator = TerminatorKind::Drop { - target: blocks[drop_data.1].unwrap(), - // TODO: The caller will register this if needed. - unwind: None, - location: drop_data.0.local.into(), - }; - cfg.terminate(blocks[drop_idx].unwrap(), drop_data.0.source_info, terminator); - } - // Root nodes don't correspond to a drop. - DropKind::Storage if drop_idx < drops.num_roots => {} - DropKind::Storage => { - let block = blocks[drop_idx].unwrap(); - let stmt = Statement { - source_info: drop_data.0.source_info, - kind: StatementKind::StorageDead(drop_data.0.local), - }; - cfg.push(block, stmt); - let target = blocks[drop_data.1].unwrap(); - if target != block { - let terminator = TerminatorKind::Goto { target }; - cfg.terminate(block, drop_data.0.source_info, terminator); - } - } - } - } -} - struct ExitScopes; impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes { From a6bec9d7902fa389ede52e2d0df932dbdcb3eea8 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 16 Nov 2019 16:06:12 +0000 Subject: [PATCH 06/16] Temp: Cleanup 4 (resume block) --- src/librustc_mir_build/build/scope.rs | 31 ++++++++++----------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index e9b09d97c54e0..e4b05da936048 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -1141,6 +1141,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { &mut self.scopes.unwind_drops, self.fn_span, should_abort, + &mut None, ); } } @@ -1161,8 +1162,9 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { } // Build the drop tree for unwinding in the normal control flow paths. - let resume_block = - Self::build_unwind_tree(cfg, &mut self.scopes.unwind_drops, fn_span, should_abort); + let resume_block = &mut None; + let unwind_drops = &mut self.scopes.unwind_drops; + Self::build_unwind_tree(cfg, unwind_drops, fn_span, should_abort, resume_block); // Build the drop tree for unwinding when dropping a suspended // generator. @@ -1176,18 +1178,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap())); } } - let mut blocks = IndexVec::from_elem(None, &drops.drops); - blocks[ROOT_NODE] = resume_block; - drops.build_mir::(cfg, &mut blocks); - if let (None, Some(new_resume_block)) = (resume_block, blocks[ROOT_NODE]) { - let terminator = - if should_abort { TerminatorKind::Abort } else { TerminatorKind::Resume }; - cfg.terminate( - new_resume_block, - SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: fn_span }, - terminator, - ); - } + Self::build_unwind_tree(cfg, drops, fn_span, should_abort, resume_block); } fn build_unwind_tree( @@ -1195,20 +1186,20 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { drops: &mut DropTree, fn_span: Span, should_abort: bool, - ) -> Option { + resume_block: &mut Option, + ) { let mut blocks = IndexVec::from_elem(None, &drops.drops); + blocks[ROOT_NODE] = *resume_block; drops.build_mir::(cfg, &mut blocks); - if let Some(resume_block) = blocks[ROOT_NODE] { + if let (None, Some(resume)) = (*resume_block, blocks[ROOT_NODE]) { let terminator = if should_abort { TerminatorKind::Abort } else { TerminatorKind::Resume }; cfg.terminate( - resume_block, + resume, SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span: fn_span }, terminator, ); - Some(resume_block) - } else { - None + *resume_block = blocks[ROOT_NODE]; } } } From e41a7c52f75da289769aeff842237b7358dc00f4 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 16 Nov 2019 22:19:24 +0000 Subject: [PATCH 07/16] Temp: Cleanup 5 (colors) --- src/librustc_mir/util/graphviz.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/librustc_mir/util/graphviz.rs b/src/librustc_mir/util/graphviz.rs index 8291bc958808d..3727f4a1acd48 100644 --- a/src/librustc_mir/util/graphviz.rs +++ b/src/librustc_mir/util/graphviz.rs @@ -97,12 +97,17 @@ where write!(w, r#""#)?; // Basic block number at the top. + let (blk, color) = if data.is_cleanup { + (format!("{} (cleanup)", block.index()), "lightblue") + } else { + (format!("{}", block.index()), "gray") + }; write!( w, - r#""#, - attrs = r#"bgcolor="gray" align="center""#, + r#""#, colspan = num_cols, - blk = block.index() + blk = blk, + color = color )?; init(w)?; From d44233ebe6397246270e736a002376d65e71b890 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sun, 17 Nov 2019 09:36:31 +0000 Subject: [PATCH 08/16] Temp: Cleanup 6 (Separate exit trees) --- src/librustc_mir_build/build/mod.rs | 2 +- src/librustc_mir_build/build/scope.rs | 64 ++++++++++++++------------- 2 files changed, 35 insertions(+), 31 deletions(-) diff --git a/src/librustc_mir_build/build/mod.rs b/src/librustc_mir_build/build/mod.rs index 3b6aa7bac8afb..f752314acda2a 100644 --- a/src/librustc_mir_build/build/mod.rs +++ b/src/librustc_mir_build/build/mod.rs @@ -711,7 +711,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fn_span: span, arg_count, generator_kind, - scopes: scope::Scopes::new(is_generator), + scopes: scope::Scopes::new(), block_context: BlockContext::new(), source_scopes: IndexVec::new(), source_scope: OUTERMOST_SOURCE_SCOPE, diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index e4b05da936048..9456eb0d40199 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -155,8 +155,10 @@ struct BreakableScope<'tcx> { /// The destination of the loop/block expression itself (i.e., where to put /// the result of a `break` or `return` expression) break_destination: Place<'tcx>, - /// Drops that happen on the - drops: DropTree, + /// Drops that happen on the `break`/`return` path. + break_drops: DropTree, + /// Drops that happen on the `continue` path. + continue_drops: Option, } /// The target of an expression that breaks out of a scope @@ -172,10 +174,8 @@ rustc_index::newtype_index! { } const ROOT_NODE: DropIdx = DropIdx::from_u32_const(0); -const CONTINUE_NODE: DropIdx = DropIdx::from_u32_const(1); -/// A tree (usually, sometimes this is a forest of two trees) of drops that we -/// have deferred lowering. It's used for: +/// A tree of drops that we have deferred lowering. It's used for: /// /// * Drops on unwind paths /// * Drops on generator drop paths (when a suspended generator is dropped) @@ -189,12 +189,10 @@ struct DropTree { drops: IndexVec, /// Map for finding the inverse of the `next_drop` relation: /// - /// `previous_drops[(next_drop[i], drops[i].local, drops[i].kind] == i` + /// `previous_drops[(drops[i].1, drops[i].0.local, drops[i].0.kind] == i` previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>, /// Edges into the `DropTree` that need to be added once it's lowered. entry_points: Vec<(DropIdx, BasicBlock)>, - /// The first non-root nodes in the forest. - first_non_root: DropIdx, } impl Scope { @@ -225,15 +223,14 @@ trait DropTreeBuilder<'tcx> { } impl DropTree { - fn new(num_roots: usize) -> Self { + fn new() -> Self { let fake_source_info = SourceInfo { span: DUMMY_SP, scope: OUTERMOST_SOURCE_SCOPE }; let fake_data = DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage }; let drop_idx = DropIdx::MAX; - let drops = IndexVec::from_elem_n((fake_data, drop_idx), num_roots); + let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1); Self { drops, - first_non_root: DropIdx::from_usize(num_roots), entry_points: Vec::new(), previous_drops: FxHashMap::default(), } @@ -248,6 +245,7 @@ impl DropTree { } fn add_entry(&mut self, from: BasicBlock, to: DropIdx) { + debug_assert!(to < self.drops.next_index()); self.entry_points.push((to, from)); } @@ -285,9 +283,11 @@ impl DropTree { } let mut needs_block = IndexVec::from_elem(Block::None, &self.drops); - if self.first_non_root > CONTINUE_NODE { - // `continue` already has its own node. - needs_block[CONTINUE_NODE] = Block::Own; + if blocks[ROOT_NODE].is_some() { + // In some cases (such as drops for `continue`) the root node + // already has a block. In this case, make sure that we don't + // override it. + needs_block[ROOT_NODE] = Block::Own; } // Sort so that we only need to check the last @@ -315,7 +315,7 @@ impl DropTree { if let DropKind::Value = drop_data.0.kind { needs_block[drop_data.1] = Block::Own; } else { - if drop_idx >= self.first_non_root { + if drop_idx != ROOT_NODE { match &mut needs_block[drop_data.1] { pred @ Block::None => *pred = Block::Shares(drop_idx), pred @ Block::Shares(_) => *pred = Block::Own, @@ -347,7 +347,7 @@ impl DropTree { cfg.terminate(block, drop_data.0.source_info, terminator); } // Root nodes don't correspond to a drop. - DropKind::Storage if drop_idx < self.first_non_root => {} + DropKind::Storage if drop_idx == ROOT_NODE => {} DropKind::Storage => { let stmt = Statement { source_info: drop_data.0.source_info, @@ -366,12 +366,12 @@ impl DropTree { } impl<'tcx> Scopes<'tcx> { - pub(crate) fn new(is_generator: bool) -> Self { + pub(crate) fn new() -> Self { Self { scopes: Vec::new(), breakable_scopes: Vec::new(), - unwind_drops: DropTree::new(1), - generator_drops: DropTree::new(is_generator as usize), + unwind_drops: DropTree::new(), + generator_drops: DropTree::new(), } } @@ -429,13 +429,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let scope = BreakableScope { region_scope, break_destination, - drops: DropTree::new(1 + loop_block.is_some() as usize), + break_drops: DropTree::new(), + continue_drops: loop_block.map(|_| DropTree::new()), }; self.scopes.breakable_scopes.push(scope); let normal_exit_block = f(self); let breakable_scope = self.scopes.breakable_scopes.pop().unwrap(); assert!(breakable_scope.region_scope == region_scope); - let break_block = self.build_exit_tree(breakable_scope.drops, loop_block); + let break_block = self.build_exit_tree(breakable_scope.break_drops, None); + breakable_scope.continue_drops.map(|drops| { + self.build_exit_tree(drops, loop_block); + }); match (normal_exit_block, break_block) { (Some(block), None) | (None, Some(block)) => block, (None, None) => self.cfg.start_new_block().unit(), @@ -600,10 +604,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let region_scope = self.scopes.breakable_scopes[break_index].region_scope; let scope_index = self.scopes.scope_index(region_scope, span); - let exited_scopes = &self.scopes.scopes[scope_index + 1..]; - let scope_drops = exited_scopes.iter().flat_map(|scope| &scope.drops); + let drops = if destination.is_some() { + &mut self.scopes.breakable_scopes[break_index].break_drops + } else { + self.scopes.breakable_scopes[break_index].continue_drops.as_mut().unwrap() + }; - let drops = &mut self.scopes.breakable_scopes[break_index].drops; let mut drop_idx = DropIdx::from_u32(destination.is_none() as u32); for drop in scope_drops { drop_idx = drops.add_drop(*drop, drop_idx); @@ -1095,15 +1101,13 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { continue_block: Option, ) -> Option> { let mut blocks = IndexVec::from_elem(None, &drops.drops); - if continue_block.is_some() { - blocks[CONTINUE_NODE] = continue_block; - } + blocks[ROOT_NODE] = continue_block; + drops.build_mir::(&mut self.cfg, &mut blocks); if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) { let unwind_target = self.diverge_cleanup(); - let num_roots = drops.first_non_root.index(); - let mut unwind_indices = IndexVec::from_elem_n(unwind_target, num_roots); - for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(num_roots) { + let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1); + for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) { match drop_data.0.kind { DropKind::Storage => { if self.is_generator { From 62fccdcf6a7b2561938d403cb5dc38dd626b3044 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sun, 17 Nov 2019 11:26:00 +0000 Subject: [PATCH 09/16] Temp: Caching --- src/librustc_mir_build/build/scope.rs | 145 ++++++++++++++++---------- 1 file changed, 90 insertions(+), 55 deletions(-) diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index 9456eb0d40199..507a230859e5e 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -105,8 +105,6 @@ pub struct Scopes<'tcx> { /// Drops that need to be done on paths to the `GeneratorDrop` terminator. generator_drops: DropTree, - // TODO: implement caching - // cached_unwind_drop: DropIdx, } #[derive(Debug)] @@ -127,6 +125,14 @@ struct Scope { drops: Vec, moved_locals: Vec, + + /// The drop index that will drop everything in and below this scope on an + /// unwind path. + cached_unwind_block: Option, + + /// The drop index that will drop everything in and below this scope on a + /// generator drop path. + cached_generator_drop_block: Option, } #[derive(Clone, Copy, Debug)] @@ -213,6 +219,11 @@ impl Scope { DropKind::Storage => false, }) } + + fn invalidate_cache(&mut self) { + self.cached_unwind_block = None; + self.cached_generator_drop_block = None; + } } /// A trait that determined how [DropTree::lower_to_mir] creates its blocks and @@ -229,11 +240,7 @@ impl DropTree { DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage }; let drop_idx = DropIdx::MAX; let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1); - Self { - drops, - entry_points: Vec::new(), - previous_drops: FxHashMap::default(), - } + Self { drops, entry_points: Vec::new(), previous_drops: FxHashMap::default() } } fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx { @@ -383,6 +390,8 @@ impl<'tcx> Scopes<'tcx> { region_scope_span: region_scope.1.span, drops: vec![], moved_locals: vec![], + cached_unwind_block: None, + cached_generator_drop_block: None, }); } @@ -399,10 +408,6 @@ impl<'tcx> Scopes<'tcx> { .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope)) } - fn iter_mut(&mut self) -> impl DoubleEndedIterator + '_ { - self.scopes.iter_mut().rev() - } - /// Returns the topmost active scope, which is known to be alive until /// the next scope expression. fn topmost(&self) -> region::Scope { @@ -609,10 +614,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } else { self.scopes.breakable_scopes[break_index].continue_drops.as_mut().unwrap() }; - - let mut drop_idx = DropIdx::from_u32(destination.is_none() as u32); - for drop in scope_drops { - drop_idx = drops.add_drop(*drop, drop_idx); + let mut drop_idx = ROOT_NODE; + for scope in &self.scopes.scopes[scope_index + 1..] { + for drop in &scope.drops { + drop_idx = drops.add_drop(*drop, drop_idx); + } } drops.add_entry(block, drop_idx); // `build_drop_tree` doesn't have access to our source_info, so we @@ -668,19 +674,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { )) } - /// Sets up a path that performs all required cleanup for dropping a generator. - /// - /// This path terminates in GeneratorDrop. Returns the start of the path. - /// None indicates there’s no cleanup to do at this point. - crate fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) { - let drops = self.scopes.scopes.iter().flat_map(|scope| &scope.drops); - let mut next_drop = ROOT_NODE; - for drop in drops { - next_drop = self.scopes.generator_drops.add_drop(*drop, next_drop); - } - self.scopes.generator_drops.add_entry(yield_block, next_drop); - } - /// Creates a new source scope, nested in the current one. crate fn new_source_scope( &mut self, @@ -777,8 +770,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { local: Local, drop_kind: DropKind, ) { - // TODO: add back in caching. - let _needs_drop = match drop_kind { + let needs_drop = match drop_kind { DropKind::Value => { if !self.hir.needs_drop(self.local_decls[local].ty) { return; @@ -798,23 +790,29 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } }; - let scope = self - .scopes - .iter_mut() - .find(|scope| scope.region_scope == region_scope) - .unwrap_or_else(|| { - span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local); - }); - - let region_scope_span = region_scope.span(self.hir.tcx(), &self.hir.region_scope_tree); - // Attribute scope exit drops to scope's closing brace. - let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span); - - scope.drops.push(DropData { - source_info: SourceInfo { span: scope_end, scope: scope.source_scope }, - local, - kind: drop_kind, - }); + let invalidate_caches = needs_drop || self.is_generator; + for scope in self.scopes.scopes.iter_mut().rev() { + if invalidate_caches { + scope.invalidate_cache(); + } + + if scope.region_scope == region_scope { + let region_scope_span = + region_scope.span(self.hir.tcx(), &self.hir.region_scope_tree); + // Attribute scope exit drops to scope's closing brace. + let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span); + + scope.drops.push(DropData { + source_info: SourceInfo { span: scope_end, scope: scope.source_scope }, + local, + kind: drop_kind, + }); + + return; + } + } + + span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local); } /// Indicates that the "local operand" stored in `local` is @@ -861,9 +859,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } Some(local_scope) => self + .scopes .scopes .iter_mut() - .find(|scope| scope.region_scope == local_scope) + .rfind(|scope| scope.region_scope == local_scope) .unwrap_or_else(|| bug!("scope {:?} not found in scope list!", local_scope)), }; @@ -913,6 +912,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Manually drop the condition on both branches. let top_scope = self.scopes.scopes.last_mut().unwrap(); let top_drop_data = top_scope.drops.pop().unwrap(); + if self.is_generator { + top_scope.invalidate_cache(); + } match top_drop_data.kind { DropKind::Value { .. } => { @@ -943,17 +945,25 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fn diverge_cleanup(&mut self) -> DropIdx { let is_generator = self.is_generator; - let drops = self + let (uncached_scope, mut cached_drop) = self .scopes .scopes .iter() - .flat_map(|scope| &scope.drops) - .filter(|drop| is_generator || drop.kind == DropKind::Value); - let mut next_drop = ROOT_NODE; - for drop in drops { - next_drop = self.scopes.unwind_drops.add_drop(*drop, next_drop); + .enumerate() + .rev() + .find_map(|(scope_idx, scope)| { + scope.cached_unwind_block.map(|cached_block| (scope_idx + 1, cached_block)) + }) + .unwrap_or((0, ROOT_NODE)); + for scope in &mut self.scopes.scopes[uncached_scope..] { + for drop in &scope.drops { + if is_generator || drop.kind == DropKind::Value { + cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop); + } + } + scope.cached_unwind_block = Some(cached_drop); } - next_drop + cached_drop } /// Prepares to create a path that performs all required cleanup for @@ -966,6 +976,30 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.scopes.unwind_drops.add_entry(start, next_drop); } + /// Sets up a path that performs all required cleanup for dropping a generator. + /// + /// This path terminates in GeneratorDrop. Returns the start of the path. + /// None indicates there’s no cleanup to do at this point. + crate fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) { + let (uncached_scope, mut cached_drop) = self + .scopes + .scopes + .iter() + .enumerate() + .rev() + .find_map(|(scope_idx, scope)| { + scope.cached_generator_drop_block.map(|cached_block| (scope_idx + 1, cached_block)) + }) + .unwrap_or((0, ROOT_NODE)); + for scope in &mut self.scopes.scopes[uncached_scope..] { + for drop in &scope.drops { + cached_drop = self.scopes.generator_drops.add_drop(*drop, cached_drop); + } + scope.cached_generator_drop_block = Some(cached_drop); + } + self.scopes.generator_drops.add_entry(yield_block, cached_drop); + } + /// Utility function for *non*-scope code to build their own drops crate fn build_drop_and_replace( &mut self, @@ -1022,6 +1056,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { assert_eq!(top_scope.region_scope, region_scope); top_scope.drops.clear(); + top_scope.invalidate_cache(); } } From b4eeee6c0c5e69af0d8347513386f31ed15dc22e Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sun, 17 Nov 2019 11:26:19 +0000 Subject: [PATCH 10/16] Temp: More test changes --- src/test/codegen/drop.rs | 6 +-- .../mir-opt/generator-storage-dead-unwind.rs | 43 +++++++++---------- 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/src/test/codegen/drop.rs b/src/test/codegen/drop.rs index 0c7f3bb2020a9..99a791464ab89 100644 --- a/src/test/codegen/drop.rs +++ b/src/test/codegen/drop.rs @@ -23,13 +23,13 @@ pub fn droppy() { // FIXME(eddyb) the `void @` forces a match on the instruction, instead of the // comment, that's `; call core::intrinsics::drop_in_place::` // for the `v0` mangling, should switch to matching on that once `legacy` is gone. +// CHECK-NOT: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName +// CHECK: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName +// CHECK: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK-NOT: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName -// CHECK-NOT: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName -// CHECK: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName -// CHECK: invoke void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK: call void @{{.*}}drop_in_place{{.*}}SomeUniqueName // CHECK-NOT: {{(call|invoke) void @.*}}drop_in_place{{.*}}SomeUniqueName // The next line checks for the } that ends the function definition diff --git a/src/test/mir-opt/generator-storage-dead-unwind.rs b/src/test/mir-opt/generator-storage-dead-unwind.rs index 6d0600100b280..fd91945e09660 100644 --- a/src/test/mir-opt/generator-storage-dead-unwind.rs +++ b/src/test/mir-opt/generator-storage-dead-unwind.rs @@ -54,17 +54,15 @@ fn main() { // bb1: { // ... // StorageLive(_7); -// StorageLive(_8); -// _8 = move _3; -// _7 = const take::(move _8) -> [return: bb2, unwind: bb11]; +// _7 = move _2; +// _6 = const take::(move _7) -> [return: bb2, unwind: bb9]; // } // bb2: { // StorageDead(_8); // StorageDead(_7); // StorageLive(_9); -// StorageLive(_10); -// _10 = move _4; -// _9 = const take::(move _10) -> [return: bb3, unwind: bb10]; +// _9 = move _3; +// _8 = const take::(move _9) -> [return: bb3, unwind: bb8]; // } // bb3: { // StorageDead(_10); @@ -72,7 +70,8 @@ fn main() { // ... // StorageDead(_4); // StorageDead(_3); -// drop(_1) -> [return: bb4, unwind: bb9]; +// StorageDead(_2); +// drop(_1) -> [return: bb4, unwind: bb11]; // } // bb4: { // return; @@ -80,36 +79,36 @@ fn main() { // bb5: { // ... // StorageDead(_3); -// drop(_1) -> [return: bb6, unwind: bb8]; +// drop(_2) -> [return: bb6, unwind: bb12]; // } // bb6: { -// StorageDead(_3); -// drop(_1) -> [return: bb7, unwind: bb9]; +// StorageDead(_2); +// drop(_1) -> [return: bb7, unwind: bb11]; // } // bb7: { // generator_drop; // } // bb8 (cleanup): { -// StorageDead(_3); -// drop(_2) -> bb9; +// StorageDead(_9); +// StorageDead(_8); +// goto -> bb10; // } // bb9 (cleanup): { -// resume; +// StorageDead(_7); +// StorageDead(_6); +// goto -> bb10; // } // bb10 (cleanup): { -// StorageDead(_10); -// StorageDead(_9); -// goto -> bb12; +// StorageDead(_3); +// StorageDead(_2); +// drop(_1) -> bb11; // } // bb11 (cleanup): { -// StorageDead(_8); -// StorageDead(_7); -// goto -> bb12; +// resume; // } // bb12 (cleanup): { -// StorageDead(_4); -// StorageDead(_3); -// drop(_1) -> bb9; +// StorageDead(_2); +// drop(_1) -> bb11; // } // END rustc.main-{{closure}}.StateTransform.before.mir From ce3149748088ab8ae6b5d11f86b0b98eb924f12c Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sun, 17 Nov 2019 11:30:48 +0000 Subject: [PATCH 11/16] Reduce the number of drop-flag assignments in unwind paths --- .../dataflow/move_paths/builder.rs | 5 +- src/librustc_mir/util/elaborate_drops.rs | 50 +++++-------------- src/test/mir-opt/unusual-item-types.rs | 15 +++--- 3 files changed, 19 insertions(+), 51 deletions(-) diff --git a/src/librustc_mir/dataflow/move_paths/builder.rs b/src/librustc_mir/dataflow/move_paths/builder.rs index 57aa5de7f7a31..b65999a746cbd 100644 --- a/src/librustc_mir/dataflow/move_paths/builder.rs +++ b/src/librustc_mir/dataflow/move_paths/builder.rs @@ -361,6 +361,7 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> { fn gather_terminator(&mut self, term: &Terminator<'tcx>) { match term.kind { TerminatorKind::Goto { target: _ } + | TerminatorKind::Return | TerminatorKind::Resume | TerminatorKind::Abort | TerminatorKind::GeneratorDrop @@ -368,10 +369,6 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> { | TerminatorKind::FalseUnwind { .. } | TerminatorKind::Unreachable => {} - TerminatorKind::Return => { - self.gather_move(&Place::return_place()); - } - TerminatorKind::Assert { ref cond, .. } => { self.gather_operand(cond); } diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index fab64e37cbc5a..e0bb04a9c7023 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -163,8 +163,6 @@ where .patch_terminator(bb, TerminatorKind::Goto { target: self.succ }); } DropStyle::Static => { - let loc = self.terminator_loc(bb); - self.elaborator.clear_drop_flag(loc, self.path, DropFlagMode::Deep); self.elaborator.patch().patch_terminator( bb, TerminatorKind::Drop { @@ -175,9 +173,7 @@ where ); } DropStyle::Conditional => { - let unwind = self.unwind; // FIXME(#43234) - let succ = self.succ; - let drop_bb = self.complete_drop(Some(DropFlagMode::Deep), succ, unwind); + let drop_bb = self.complete_drop(self.succ, self.unwind); self.elaborator .patch() .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb }); @@ -249,7 +245,7 @@ where // our own drop flag. path: self.path, } - .complete_drop(None, succ, unwind) + .complete_drop(succ, unwind) } } @@ -280,13 +276,7 @@ where // Clear the "master" drop flag at the end. This is needed // because the "master" drop protects the ADT's discriminant, // which is invalidated after the ADT is dropped. - let (succ, unwind) = (self.succ, self.unwind); // FIXME(#43234) - ( - self.drop_flag_reset_block(DropFlagMode::Shallow, succ, unwind), - unwind.map(|unwind| { - self.drop_flag_reset_block(DropFlagMode::Shallow, unwind, Unwind::InCleanup) - }), - ) + (self.drop_flag_reset_block(DropFlagMode::Shallow, self.succ, self.unwind), self.unwind) } /// Creates a full drop ladder, consisting of 2 connected half-drop-ladders @@ -813,11 +803,7 @@ where self.open_drop_for_adt(def, substs) } } - ty::Dynamic(..) => { - let unwind = self.unwind; // FIXME(#43234) - let succ = self.succ; - self.complete_drop(Some(DropFlagMode::Deep), succ, unwind) - } + ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind), ty::Array(ety, size) => { let size = size.try_eval_usize(self.tcx(), self.elaborator.param_env()); self.open_drop_for_array(ety, size) @@ -829,26 +815,14 @@ where } /// Returns a basic block that drop a place using the context - /// and path in `c`. If `mode` is something, also clear `c` - /// according to it. + /// and path in `c`. /// /// if FLAG(self.path) - /// if let Some(mode) = mode: FLAG(self.path)[mode] = false /// drop(self.place) - fn complete_drop( - &mut self, - drop_mode: Option, - succ: BasicBlock, - unwind: Unwind, - ) -> BasicBlock { - debug!("complete_drop({:?},{:?})", self, drop_mode); + fn complete_drop(&mut self, succ: BasicBlock, unwind: Unwind) -> BasicBlock { + debug!("complete_drop(succ={:?}, unwind={:?})", succ, unwind); let drop_block = self.drop_block(succ, unwind); - let drop_block = if let Some(mode) = drop_mode { - self.drop_flag_reset_block(mode, drop_block, unwind) - } else { - drop_block - }; self.drop_flag_test_block(drop_block, succ, unwind) } @@ -861,6 +835,11 @@ where ) -> BasicBlock { debug!("drop_flag_reset_block({:?},{:?})", self, mode); + if unwind.is_cleanup() { + // The drop flag isn't read again on the unwind path, so don't + // bother setting it. + return succ; + } let block = self.new_block(unwind, TerminatorKind::Goto { target: succ }); let block_start = Location { block: block, statement_index: 0 }; self.elaborator.clear_drop_flag(block_start, self.path, mode); @@ -964,11 +943,6 @@ where self.elaborator.patch().new_temp(ty, self.source_info.span) } - fn terminator_loc(&mut self, bb: BasicBlock) -> Location { - let body = self.elaborator.body(); - self.elaborator.patch().terminator_loc(body, bb) - } - fn constant_usize(&self, val: u16) -> Operand<'tcx> { Operand::Constant(box Constant { span: self.source_info.span, diff --git a/src/test/mir-opt/unusual-item-types.rs b/src/test/mir-opt/unusual-item-types.rs index 3590bd1045bf8..1f8343c16662a 100644 --- a/src/test/mir-opt/unusual-item-types.rs +++ b/src/test/mir-opt/unusual-item-types.rs @@ -40,8 +40,8 @@ fn main() { // END rustc.E-V-{{constant}}.mir_map.0.mir // START rustc.ptr-drop_in_place.std__vec__Vec_i32_.AddMovesForPackedDrops.before.mir -// bb0: { -// goto -> bb7; +// bb0: { +// goto -> bb6; // } // bb1: { // return; @@ -53,17 +53,14 @@ fn main() { // goto -> bb1; // } // bb4 (cleanup): { -// goto -> bb2; +// drop(((*_1).0: alloc::raw_vec::RawVec)) -> bb2; // } -// bb5 (cleanup): { -// drop(((*_1).0: alloc::raw_vec::RawVec)) -> bb4; +// bb5: { +// drop(((*_1).0: alloc::raw_vec::RawVec)) -> [return: bb3, unwind: bb2]; // } // bb6: { -// drop(((*_1).0: alloc::raw_vec::RawVec)) -> [return: bb3, unwind: bb4]; -// } -// bb7: { // _2 = &mut (*_1); -// _3 = const as std::ops::Drop>::drop(move _2) -> [return: bb6, unwind: bb5]; +// _3 = const as std::ops::Drop>::drop(move _2) -> [return: bb5, unwind: bb4]; // } // END rustc.ptr-drop_in_place.std__vec__Vec_i32_.AddMovesForPackedDrops.before.mir From aa6f3e519968fdf3d9a098489f089846aea74b8b Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sun, 17 Nov 2019 14:38:59 +0000 Subject: [PATCH 12/16] Temp: Fix a bug --- src/librustc_mir_build/build/scope.rs | 31 +++++++++------------------ 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index 507a230859e5e..d2217276bf33f 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -642,27 +642,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock { // If we are emitting a `drop` statement, we need to have the cached // diverge cleanup pads ready in case that drop panics. - let scope = self.scopes.scopes.last().expect("exit_top_scope called with no scopes"); + let needs_cleanup = self.scopes.scopes.last().map_or(false, |scope| scope.needs_cleanup()); let is_generator = self.generator_kind.is_some(); - let needs_cleanup = scope.needs_cleanup(); + let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX }; - let unwind_to = if needs_cleanup { - let mut drops = self - .scopes - .scopes - .iter() - .flat_map(|scope| &scope.drops) - .filter(|drop| is_generator || drop.kind == DropKind::Value); - let mut next_drop = ROOT_NODE; - let mut drop_info = drops.next().unwrap(); - for previous_drop_info in drops { - next_drop = self.scopes.unwind_drops.add_drop(*drop_info, next_drop); - drop_info = previous_drop_info; - } - next_drop - } else { - DropIdx::MAX - }; + let scope = self.scopes.scopes.last().expect("exit_top_scope called with no scopes"); unpack!(build_scope_drops( &mut self.cfg, &mut self.scopes.unwind_drops, @@ -1097,16 +1081,18 @@ fn build_scope_drops<'tcx>( match drop_data.kind { DropKind::Value => { + debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local); + debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind); + unwind_to = unwind_drops.drops[unwind_to].1; // If the operand has been moved, and we are not on an unwind // path, then don't generate the drop. (We only take this into // account for non-unwind paths so as not to disturb the // caching mechanism.) if scope.moved_locals.iter().any(|&o| o == local) { - unwind_to = unwind_drops.drops[unwind_to].1; continue; } - unwind_drops.entry_points.push((unwind_to, block)); + unwind_drops.add_entry(block, unwind_to); let next = cfg.start_new_block(); cfg.terminate( @@ -1118,6 +1104,8 @@ fn build_scope_drops<'tcx>( } DropKind::Storage => { if storage_dead_on_unwind { + debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local); + debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind); unwind_to = unwind_drops.drops[unwind_to].1; } // Only temps and vars need their storage dead. @@ -1214,6 +1202,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { // optimization is, but it is here. for (drop_idx, drop_data) in drops.drops.iter_enumerated() { if let DropKind::Value = drop_data.0.kind { + debug_assert!(drop_data.1 < drops.drops.next_index()); drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap())); } } From cb7c75bbdad7e34140e028e7112e3dba88b7a9f5 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sun, 17 Nov 2019 17:20:11 +0000 Subject: [PATCH 13/16] Temp: Fix debuginfo --- src/librustc_mir_build/build/scope.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index d2217276bf33f..eeb1620352fdc 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -363,8 +363,12 @@ impl DropTree { cfg.push(block, stmt); let target = blocks[drop_data.1].unwrap(); if target != block { + // Diagnostics don't use this `Span` but debuginfo + // might, which could cause breakpoints to end up in the + // wrong place. + let source_info = SourceInfo { span: DUMMY_SP, ..drop_data.0.source_info }; let terminator = TerminatorKind::Goto { target }; - cfg.terminate(block, drop_data.0.source_info, terminator); + cfg.terminate(block, source_info, terminator); } } } From 1f5fb7bea0e0a3fb3d827269fa8e5e4e87957026 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sun, 17 Nov 2019 22:14:16 +0000 Subject: [PATCH 14/16] Make `into` schedule drop for the destination again --- src/librustc_mir_build/build/block.rs | 12 +- .../build/expr/as_rvalue.rs | 6 +- src/librustc_mir_build/build/expr/as_temp.rs | 6 +- src/librustc_mir_build/build/expr/into.rs | 56 ++- src/librustc_mir_build/build/into.rs | 13 +- src/librustc_mir_build/build/matches/mod.rs | 94 +--- src/librustc_mir_build/build/mod.rs | 20 +- src/librustc_mir_build/build/scope.rs | 194 ++++++-- src/test/mir-opt/box_expr.rs | 2 +- src/test/mir-opt/issue-62289.rs | 19 +- src/test/ui/drop/dynamic-drop-async.rs | 165 ++++--- src/test/ui/drop/dynamic-drop.rs | 419 +++++++++++------- 12 files changed, 648 insertions(+), 358 deletions(-) diff --git a/src/librustc_mir_build/build/block.rs b/src/librustc_mir_build/build/block.rs index 64cca99b2ebd5..9303c811df45f 100644 --- a/src/librustc_mir_build/build/block.rs +++ b/src/librustc_mir_build/build/block.rs @@ -2,14 +2,16 @@ use crate::build::matches::ArmHasGuard; use crate::build::ForGuard::OutsideGuard; use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; use crate::hair::*; +use rustc::middle::region; use rustc::mir::*; use rustc_hir as hir; use rustc_span::Span; impl<'a, 'tcx> Builder<'a, 'tcx> { - crate fn ast_block( + pub fn ast_block( &mut self, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, ast_block: &'tcx hir::Block<'tcx>, source_info: SourceInfo, @@ -26,9 +28,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.in_opt_scope(opt_destruction_scope.map(|de| (de, source_info)), move |this| { this.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| { if targeted_by_break { - this.in_breakable_scope(None, destination.clone(), span, |this| { + this.in_breakable_scope(None, destination.clone(), scope, span, |this| { Some(this.ast_block_stmts( destination, + scope, block, span, stmts, @@ -37,7 +40,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { )) }) } else { - this.ast_block_stmts(destination, block, span, stmts, expr, safety_mode) + this.ast_block_stmts(destination, scope, block, span, stmts, expr, safety_mode) } }) }) @@ -46,6 +49,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fn ast_block_stmts( &mut self, destination: &Place<'tcx>, + scope: Option, mut block: BasicBlock, span: Span, stmts: Vec>, @@ -177,7 +181,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { destination_ty.is_unit() || this.block_context.currently_ignores_tail_results(); this.block_context.push(BlockFrame::TailExpr { tail_result_is_ignored }); - unpack!(block = this.into(destination, block, expr)); + unpack!(block = this.into(destination, scope, block, expr)); let popped = this.block_context.pop(); assert!(popped.map_or(false, |bf| bf.is_tail_expr())); diff --git a/src/librustc_mir_build/build/expr/as_rvalue.rs b/src/librustc_mir_build/build/expr/as_rvalue.rs index dc97f321a36ad..fb1b22ad14cfc 100644 --- a/src/librustc_mir_build/build/expr/as_rvalue.rs +++ b/src/librustc_mir_build/build/expr/as_rvalue.rs @@ -111,12 +111,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let box_ = Rvalue::NullaryOp(NullOp::Box, value.ty); this.cfg.push_assign(block, source_info, &Place::from(result), box_); - // initialize the box contents: + // Initialize the box contents. No scope is needed since the + // `Box` is already scheduled to be dropped. unpack!( block = this.into( &this.hir.tcx().mk_place_deref(Place::from(result)), + None, block, - value + value, ) ); block.and(Rvalue::Use(Operand::Move(Place::from(result)))) diff --git a/src/librustc_mir_build/build/expr/as_temp.rs b/src/librustc_mir_build/build/expr/as_temp.rs index 34dd10cbc0fc8..c17fd6ce96179 100644 --- a/src/librustc_mir_build/build/expr/as_temp.rs +++ b/src/librustc_mir_build/build/expr/as_temp.rs @@ -100,11 +100,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - unpack!(block = this.into(temp_place, block, expr)); - - if let Some(temp_lifetime) = temp_lifetime { - this.schedule_drop(expr_span, temp_lifetime, temp, DropKind::Value); - } + unpack!(block = this.into(temp_place, temp_lifetime, block, expr)); block.and(temp) } diff --git a/src/librustc_mir_build/build/expr/into.rs b/src/librustc_mir_build/build/expr/into.rs index 4bea0f4801405..7b556881816f9 100644 --- a/src/librustc_mir_build/build/expr/into.rs +++ b/src/librustc_mir_build/build/expr/into.rs @@ -1,8 +1,10 @@ //! See docs in build/expr/mod.rs use crate::build::expr::category::{Category, RvalueFunc}; +use crate::build::scope::DropKind; use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; use crate::hair::*; +use rustc::middle::region; use rustc::mir::*; use rustc::ty::{self, CanonicalUserTypeAnnotation}; use rustc_data_structures::fx::FxHashMap; @@ -14,13 +16,19 @@ use rustc_target::spec::abi::Abi; impl<'a, 'tcx> Builder<'a, 'tcx> { /// Compile `expr`, storing the result into `destination`, which /// is assumed to be uninitialized. + /// If a `drop_scope` is provided, `destination` is scheduled to be dropped + /// in `scope` once it has been initialized. crate fn into_expr( &mut self, destination: &Place<'tcx>, + scope: Option, mut block: BasicBlock, expr: Expr<'tcx>, ) -> BlockAnd<()> { - debug!("into_expr(destination={:?}, block={:?}, expr={:?})", destination, block, expr); + debug!( + "into_expr(destination={:?}, scope={:?}, block={:?}, expr={:?})", + destination, scope, block, expr + ); // since we frequently have to reference `self` from within a // closure, where `self` would be shadowed, it's easier to @@ -35,6 +43,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { _ => false, }; + let schedule_drop = move |this: &mut Self| { + if let Some(drop_scope) = scope { + let local = + destination.as_local().expect("cannot schedule drop of non-Local place"); + this.schedule_drop(expr_span, drop_scope, local, DropKind::Value); + } + }; + if !expr_is_block_or_scope { this.block_context.push(BlockFrame::SubExpr); } @@ -42,13 +58,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let block_and = match expr.kind { ExprKind::Scope { region_scope, lint_level, value } => { let region_scope = (region_scope, source_info); - this.in_scope(region_scope, lint_level, |this| this.into(destination, block, value)) + this.in_scope(region_scope, lint_level, |this| { + this.into(destination, scope, block, value) + }) } ExprKind::Block { body: ast_block } => { - this.ast_block(destination, block, ast_block, source_info) + this.ast_block(destination, scope, block, ast_block, source_info) } ExprKind::Match { scrutinee, arms } => { - this.match_expr(destination, expr_span, block, scrutinee, arms) + this.match_expr(destination, scope, expr_span, block, scrutinee, arms) } ExprKind::NeverToAny { source } => { let source = this.hir.mirror(source); @@ -63,6 +81,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // This is an optimization. If the expression was a call then we already have an // unreachable block. Don't bother to terminate it and create a new one. + schedule_drop(this); if is_call { block.unit() } else { @@ -141,6 +160,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { this.in_breakable_scope( Some(loop_block), destination.clone(), + scope, expr_span, move |this| { // conduct the test, if necessary @@ -156,8 +176,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // introduce a unit temporary as the destination for the loop body. let tmp = this.get_unit_temp(); // Execute the body, branching back to the test. - let body_block_end = unpack!(this.into(&tmp, body_block, body)); - this.cfg.goto(body_block_end, source_info, loop_block); + // No scope is provided, since we've scheduled the drop above. + let body_block_end = unpack!(this.into(&tmp, None, body_block, body)); + this.cfg.terminate( + body_block_end, + source_info, + TerminatorKind::Goto { target: loop_block }, + ); + schedule_drop(this); None }, ) @@ -198,8 +224,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { is_block_tail: None, }); let ptr_temp = Place::from(ptr_temp); - let block = unpack!(this.into(&ptr_temp, block, ptr)); - this.into(&this.hir.tcx().mk_place_deref(ptr_temp), block, val) + // No need for a scope, ptr_temp doesn't need drop + let block = unpack!(this.into(&ptr_temp, None, block, ptr)); + // Maybe we should provide a scope here so that + // `move_val_init` wouldn't leak on panic even with an + // arbitrary `val` expression, but `schedule_drop`, + // borrowck and drop elaboration all prevent us from + // dropping `ptr_temp.deref()`. + this.into(&this.hir.tcx().mk_place_deref(ptr_temp), None, block, val) } else { let args: Vec<_> = args .into_iter() @@ -229,10 +261,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { from_hir_call, }, ); + schedule_drop(this); success.unit() } } - ExprKind::Use { source } => this.into(destination, block, source), + ExprKind::Use { source } => this.into(destination, scope, block, source), ExprKind::Borrow { arg, borrow_kind } => { // We don't do this in `as_rvalue` because we use `as_place` // for borrow expressions, so we cannot create an `RValue` that @@ -316,6 +349,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { destination, Rvalue::Aggregate(adt, fields), ); + schedule_drop(this); block.unit() } @@ -341,6 +375,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let place = unpack!(block = this.as_place(block, expr)); let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place)); this.cfg.push_assign(block, source_info, destination, rvalue); + schedule_drop(this); block.unit() } ExprKind::Index { .. } | ExprKind::Deref { .. } | ExprKind::Field { .. } => { @@ -358,6 +393,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let place = unpack!(block = this.as_place(block, expr)); let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place)); this.cfg.push_assign(block, source_info, destination, rvalue); + schedule_drop(this); block.unit() } @@ -371,6 +407,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { source_info, TerminatorKind::Yield { value, resume, resume_arg: *destination, drop: None }, ); + schedule_drop(this); resume.unit() } @@ -400,6 +437,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let rvalue = unpack!(block = this.as_local_rvalue(block, expr)); this.cfg.push_assign(block, source_info, destination, rvalue); + schedule_drop(this); block.unit() } }; diff --git a/src/librustc_mir_build/build/into.rs b/src/librustc_mir_build/build/into.rs index 1a2a9d2bc05fc..e57f10f0b14e9 100644 --- a/src/librustc_mir_build/build/into.rs +++ b/src/librustc_mir_build/build/into.rs @@ -6,6 +6,7 @@ use crate::build::{BlockAnd, Builder}; use crate::hair::*; +use rustc::middle::region; use rustc::mir::*; pub(in crate::build) trait EvalInto<'tcx> { @@ -13,21 +14,23 @@ pub(in crate::build) trait EvalInto<'tcx> { self, builder: &mut Builder<'_, 'tcx>, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, ) -> BlockAnd<()>; } impl<'a, 'tcx> Builder<'a, 'tcx> { - crate fn into( + pub fn into( &mut self, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, expr: E, ) -> BlockAnd<()> where E: EvalInto<'tcx>, { - expr.eval_into(self, destination, block) + expr.eval_into(self, destination, scope, block) } } @@ -36,10 +39,11 @@ impl<'tcx> EvalInto<'tcx> for ExprRef<'tcx> { self, builder: &mut Builder<'_, 'tcx>, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, ) -> BlockAnd<()> { let expr = builder.hir.mirror(self); - builder.into_expr(destination, block, expr) + builder.into_expr(destination, scope, block, expr) } } @@ -48,8 +52,9 @@ impl<'tcx> EvalInto<'tcx> for Expr<'tcx> { self, builder: &mut Builder<'_, 'tcx>, destination: &Place<'tcx>, + scope: Option, block: BasicBlock, ) -> BlockAnd<()> { - builder.into_expr(destination, block, self) + builder.into_expr(destination, scope, block, self) } } diff --git a/src/librustc_mir_build/build/matches/mod.rs b/src/librustc_mir_build/build/matches/mod.rs index 6f6eb71a0c275..0679821c5908c 100644 --- a/src/librustc_mir_build/build/matches/mod.rs +++ b/src/librustc_mir_build/build/matches/mod.rs @@ -12,6 +12,7 @@ use crate::build::{GuardFrame, GuardFrameLocal, LocalsForNode}; use crate::hair::{self, *}; use rustc::middle::region; use rustc::mir::*; +use rustc::ty::layout::VariantIdx; use rustc::ty::{self, CanonicalUserTypeAnnotation, Ty}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_hir::HirId; @@ -83,6 +84,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { crate fn match_expr( &mut self, destination: &Place<'tcx>, + destination_scope: Option, span: Span, mut block: BasicBlock, scrutinee: ExprRef<'tcx>, @@ -103,6 +105,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.lower_match_arms( destination, + destination_scope, scrutinee_place, scrutinee_span, arm_candidates, @@ -209,81 +212,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - /// Lower the bindings, guards and arm bodies of a `match` expression. - /// - /// The decision tree should have already been created - /// (by [Builder::lower_match_tree]). - /// - /// `outer_source_info` is the SourceInfo for the whole match. - fn lower_match_arms( - &mut self, - destination: &Place<'tcx>, - scrutinee_place: Place<'tcx>, - scrutinee_span: Span, - arm_candidates: Vec<(&'_ Arm<'tcx>, Candidate<'_, 'tcx>)>, - outer_source_info: SourceInfo, - fake_borrow_temps: Vec<(Place<'tcx>, Local)>, - ) -> BlockAnd<()> { - let match_scope = self.scopes.topmost(); - - let arm_end_blocks: Vec<_> = arm_candidates - .into_iter() - .map(|(arm, candidate)| { - debug!("lowering arm {:?}\ncanidate = {:?}", arm, candidate); - - let arm_source_info = self.source_info(arm.span); - let arm_scope = (arm.scope, arm_source_info); - self.in_scope(arm_scope, arm.lint_level, |this| { - let body = this.hir.mirror(arm.body.clone()); - let scope = this.declare_bindings( - None, - arm.span, - &arm.pattern, - ArmHasGuard(arm.guard.is_some()), - Some((Some(&scrutinee_place), scrutinee_span)), - ); - - let arm_block = this.bind_pattern( - outer_source_info, - candidate, - arm.guard.as_ref().map(|g| (g, match_scope)), - &fake_borrow_temps, - scrutinee_span, - // Some(arm.scope), - ); - - if let Some(source_scope) = scope { - this.source_scope = source_scope; - } - - this.into(destination, arm_block, body) - }) - }) - .collect(); - - // all the arm blocks will rejoin here - let end_block = self.cfg.start_new_block(); - - for arm_block in arm_end_blocks { - self.cfg.goto(unpack!(arm_block), outer_source_info, end_block); - } - - self.source_scope = outer_source_info.scope; - - end_block.unit() - } - /// Binds the variables and ascribes types for a given `match` arm or /// `let` binding. /// /// Also check if the guard matches, if it's provided. /// `arm_scope` should be `Some` if and only if this is called for a /// `match` arm. - fn bind_pattern( + crate fn bind_pattern( &mut self, outer_source_info: SourceInfo, candidate: Candidate<'_, 'tcx>, - guard: Option<(&Guard<'tcx>, region::Scope)>, + guard: Option<&Guard<'tcx>>, fake_borrow_temps: &Vec<(Place<'tcx>, Local)>, scrutinee_span: Span, arm_scope: Option, @@ -363,13 +302,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { PatKind::Binding { mode: BindingMode::ByValue, var, subpattern: None, .. } => { let place = self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true); - unpack!(block = self.into(&place, block, initializer)); + let region_scope = self.hir.region_scope_tree.var_scope(var.local_id); + + unpack!(block = self.into(&place, Some(region_scope), block, initializer)); // Inject a fake read, see comments on `FakeReadCause::ForLet`. let source_info = self.source_info(irrefutable_pat.span); self.cfg.push_fake_read(block, source_info, FakeReadCause::ForLet, place); - self.schedule_drop_for_binding(var, irrefutable_pat.span, OutsideGuard); block.unit() } @@ -396,9 +336,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ascription: hair::pattern::Ascription { user_ty: pat_ascription_ty, variance: _, user_ty_span }, } => { + let region_scope = self.hir.region_scope_tree.var_scope(var.local_id); let place = self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true); - unpack!(block = self.into(&place, block, initializer)); + unpack!(block = self.into(&place, Some(region_scope), block, initializer)); // Inject a fake read, see comments on `FakeReadCause::ForLet`. let pattern_source_info = self.source_info(irrefutable_pat.span); @@ -436,7 +377,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { }, ); - self.schedule_drop_for_binding(var, irrefutable_pat.span, OutsideGuard); block.unit() } @@ -655,7 +595,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } #[derive(Debug)] -struct Candidate<'pat, 'tcx> { +pub(super) struct Candidate<'pat, 'tcx> { /// `Span` of the original pattern that gave rise to this candidate span: Span, @@ -1560,10 +1500,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { &mut self, candidate: Candidate<'pat, 'tcx>, parent_bindings: &[(Vec>, Vec>)], - guard: Option<(&Guard<'tcx>, region::Scope)>, + guard: Option<&Guard<'tcx>>, fake_borrows: &Vec<(Place<'tcx>, Local)>, scrutinee_span: Span, - //region_scope: region::Scope, + schedule_drops: bool, ) -> BasicBlock { debug!("bind_and_guard_matched_candidate(candidate={:?})", candidate); @@ -1672,7 +1612,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // the reference that we create for the arm. // * So we eagerly create the reference for the arm and then take a // reference to that. - if let Some((guard, region_scope)) = guard { + if let Some(guard) = guard { let tcx = self.hir.tcx(); let bindings = parent_bindings .iter() @@ -1716,11 +1656,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { unreachable }); let outside_scope = self.cfg.start_new_block(); - self.exit_top_scope( - otherwise_post_guard_block, - candidate.otherwise_block.unwrap(), - source_info, - ); + self.exit_top_scope(otherwise_post_guard_block, outside_scope, source_info); self.false_edges( outside_scope, otherwise_block, diff --git a/src/librustc_mir_build/build/mod.rs b/src/librustc_mir_build/build/mod.rs index f752314acda2a..659393415a113 100644 --- a/src/librustc_mir_build/build/mod.rs +++ b/src/librustc_mir_build/build/mod.rs @@ -584,8 +584,12 @@ where let arg_scope_s = (arg_scope, source_info); // Attribute epilogue to function's closing brace let fn_end = span.shrink_to_hi(); - let return_block = - unpack!(builder.in_breakable_scope(None, Place::return_place(), fn_end, |builder| { + let return_block = unpack!(builder.in_breakable_scope( + None, + Place::return_place(), + Some(call_site_scope), + fn_end, + |builder| { Some(builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| { builder.args_and_body( START_BLOCK, @@ -595,7 +599,8 @@ where &body.value, ) })) - },)); + }, + )); let source_info = builder.source_info(fn_end); builder.cfg.terminate(return_block, source_info, TerminatorKind::Return); let should_abort = should_abort_on_panic(tcx, fn_def_id, abi); @@ -604,6 +609,7 @@ where if let Some(unreachable_block) = builder.cached_unreachable_block { builder.cfg.terminate(unreachable_block, source_info, TerminatorKind::Unreachable); } + builder.unschedule_return_place_drop(); return_block.unit() })); @@ -633,7 +639,9 @@ fn construct_const<'a, 'tcx>( let mut block = START_BLOCK; let ast_expr = &tcx.hir().body(body_id).value; let expr = builder.hir.mirror(ast_expr); - unpack!(block = builder.into_expr(&Place::return_place(), block, expr)); + // We don't provide a scope because we can't unwind in constants, so won't + // need to drop the return place. + unpack!(block = builder.into_expr(&Place::return_place(), None, block, expr)); let source_info = builder.source_info(span); builder.cfg.terminate(block, source_info, TerminatorKind::Return); @@ -934,7 +942,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } let body = self.hir.mirror(ast_body); - self.into(&Place::return_place(), block, body) + let call_site = + region::Scope { id: ast_body.hir_id.local_id, data: region::ScopeData::CallSite }; + self.into(&Place::return_place(), Some(call_site), block, body) } fn set_correct_source_scope_for_arg( diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index eeb1620352fdc..0971f8bdb59ec 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -81,17 +81,15 @@ that contains only loops and breakable blocks. It tracks where a `break`, */ +use crate::build::matches::{ArmHasGuard, Candidate}; use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG}; -use crate::hair::{Expr, ExprRef, LintLevel}; +use crate::hair::{Arm, Expr, ExprRef, LintLevel}; use rustc::middle::region; use rustc::mir::*; use rustc_data_structures::fx::FxHashMap; use rustc_hir as hir; -use rustc_hir::GeneratorKind; -use rustc_index::vec::{Idx, IndexVec}; +use rustc_index::vec::IndexVec; use rustc_span::{Span, DUMMY_SP}; -use std::collections::hash_map::Entry; -use std::mem; #[derive(Debug)] pub struct Scopes<'tcx> { @@ -161,6 +159,8 @@ struct BreakableScope<'tcx> { /// The destination of the loop/block expression itself (i.e., where to put /// the result of a `break` or `return` expression) break_destination: Place<'tcx>, + /// The scope that the destination should have its drop scheduled in. + destination_scope: Option, /// Drops that happen on the `break`/`return` path. break_drops: DropTree, /// Drops that happen on the `continue` path. @@ -336,7 +336,11 @@ impl DropTree { assert!(entry_points.is_empty()); } - fn link_blocks(&self, cfg: &mut CFG<'tcx>, blocks: &IndexVec>) { + fn link_blocks<'tcx>( + &self, + cfg: &mut CFG<'tcx>, + blocks: &IndexVec>, + ) { for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() { let block = if let Some(block) = blocks[drop_idx] { block @@ -428,6 +432,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { &mut self, loop_block: Option, break_destination: Place<'tcx>, + destination_scope: Option, span: Span, f: F, ) -> BlockAnd<()> @@ -438,17 +443,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let scope = BreakableScope { region_scope, break_destination, + destination_scope, break_drops: DropTree::new(), continue_drops: loop_block.map(|_| DropTree::new()), }; + let continue_blocks = loop_block.map(|block| (block, self.diverge_cleanup())); self.scopes.breakable_scopes.push(scope); let normal_exit_block = f(self); let breakable_scope = self.scopes.breakable_scopes.pop().unwrap(); assert!(breakable_scope.region_scope == region_scope); - let break_block = self.build_exit_tree(breakable_scope.break_drops, None); breakable_scope.continue_drops.map(|drops| { - self.build_exit_tree(drops, loop_block); + self.build_exit_tree(drops, continue_blocks); }); + let break_block = self.build_exit_tree(breakable_scope.break_drops, None); match (normal_exit_block, break_block) { (Some(block), None) | (None, Some(block)) => block, (None, None) => self.cfg.start_new_block().unit(), @@ -577,24 +584,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .rposition(|breakable_scope| breakable_scope.region_scope == scope) .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found")) }; - let (break_index, destination) = match scope { + let (break_index, destination, dest_scope) = match scope { BreakableTarget::Return => { let scope = &self.scopes.breakable_scopes[0]; if scope.break_destination != Place::return_place() { span_bug!(span, "`return` in item with no return scope"); } - (0, Some(scope.break_destination.clone())) + (0, Some(scope.break_destination.clone()), scope.destination_scope) } BreakableTarget::Break(scope) => { let break_index = get_scope_index(scope); - ( - break_index, - Some(self.scopes.breakable_scopes[break_index].break_destination.clone()), - ) + let scope = &self.scopes.breakable_scopes[break_index]; + (break_index, Some(scope.break_destination.clone()), scope.destination_scope) } BreakableTarget::Continue(scope) => { let break_index = get_scope_index(scope); - (break_index, None) + (break_index, None, None) } }; @@ -602,7 +607,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { if let Some(value) = value { debug!("stmt_expr Break val block_context.push(SubExpr)"); self.block_context.push(BlockFrame::SubExpr); - unpack!(block = self.into(destination, block, value)); + unpack!(block = self.into(destination, dest_scope, block, value)); + dest_scope + .map(|scope| self.unschedule_drop(scope, destination.as_local().unwrap())); self.block_context.pop(); } else { self.cfg.push_assign_unit(block, source_info, destination) @@ -778,7 +785,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } }; - let invalidate_caches = needs_drop || self.is_generator; + let invalidate_caches = needs_drop || self.generator_kind.is_some(); for scope in self.scopes.scopes.iter_mut().rev() { if invalidate_caches { scope.invalidate_cache(); @@ -803,6 +810,40 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local); } + /// Unschedule a drop. Used for `break`, `return` and `match` expressions + /// when `record_operands_moved` is not powerful enough. + /// + /// The given local is expected to have a value drop scheduled in the given + /// scope and for that drop to be the most recent thing scheduled in that + /// scope. + fn unschedule_drop(&mut self, region_scope: region::Scope, local: Local) { + if !self.hir.needs_drop(self.local_decls[local].ty) { + return; + } + for scope in self.scopes.scopes.iter_mut().rev() { + scope.invalidate_cache(); + + if scope.region_scope == region_scope { + let drop = scope.drops.pop(); + + match drop { + Some(DropData { local: removed_local, kind: DropKind::Value, .. }) + if removed_local == local => + { + return; + } + _ => bug!( + "found wrong drop, expected value drop of {:?}, found {:?}", + local, + drop, + ), + } + } + } + + bug!("region scope {:?} not in scope to unschedule drop of {:?}", region_scope, local); + } + /// Indicates that the "local operand" stored in `local` is /// *moved* at some point during execution (see `local_scope` for /// more information about what a "local operand" is -- in short, @@ -900,7 +941,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Manually drop the condition on both branches. let top_scope = self.scopes.scopes.last_mut().unwrap(); let top_drop_data = top_scope.drops.pop().unwrap(); - if self.is_generator { + if self.generator_kind.is_some() { top_scope.invalidate_cache(); } @@ -932,7 +973,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } fn diverge_cleanup(&mut self) -> DropIdx { - let is_generator = self.is_generator; + let is_generator = self.generator_kind.is_some(); let (uncached_scope, mut cached_drop) = self .scopes .scopes @@ -1032,13 +1073,99 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { success_block } - // `match` arm scopes - // ================== + /// Lower the arms and guards of a match. + /// + /// The decision tree should have already been created (by + /// [Builder::lower_match_tree]). + /// + /// This is here, and not in `build::matches` because we have to do some + /// careful scope manipulation to have the drop of the destination be + /// scheduled at the end of each arm and then cleared for the next arm. + crate fn lower_match_arms( + &mut self, + destination: &Place<'tcx>, + destination_scope: Option, + scrutinee_place: Place<'tcx>, + scrutinee_span: Span, + arm_candidates: Vec<(&'_ Arm<'tcx>, Candidate<'_, 'tcx>)>, + outer_source_info: SourceInfo, + fake_borrow_temps: Vec<(Place<'tcx>, Local)>, + ) -> BlockAnd<()> { + if arm_candidates.is_empty() { + // If there are no arms to schedule drops, then we have to do it + // manually. + if let Some(scope) = destination_scope { + self.schedule_drop( + outer_source_info.span, + scope, + destination.as_local().unwrap(), + DropKind::Value, + ); + } + return self.cfg.start_new_block().unit(); + } + let mut first_arm = true; + let cached_unwind_block = self.diverge_cleanup(); + let arm_end_blocks: Vec<_> = arm_candidates + .into_iter() + .map(|(arm, candidate)| { + if first_arm { + first_arm = false; + } else { + destination_scope.map(|scope| { + self.unschedule_drop(scope, destination.as_local().unwrap()); + }); + let top_scope = &mut self.scopes.scopes.last_mut().unwrap(); + top_scope.cached_unwind_block = Some(cached_unwind_block); + } + + let arm_source_info = self.source_info(arm.span); + let arm_scope = (arm.scope, arm_source_info); + self.in_scope(arm_scope, arm.lint_level, |this| { + let body = this.hir.mirror(arm.body.clone()); + let scope = this.declare_bindings( + None, + arm.span, + &arm.pattern, + ArmHasGuard(arm.guard.is_some()), + Some((Some(&scrutinee_place), scrutinee_span)), + ); + + let arm_block = this.bind_pattern( + outer_source_info, + candidate, + arm.guard.as_ref(), + &fake_borrow_temps, + scrutinee_span, + Some(arm.scope), + ); + + if let Some(source_scope) = scope { + this.source_scope = source_scope; + } + + this.into(destination, destination_scope, arm_block, body) + }) + }) + .collect(); + + // all the arm blocks will rejoin here + let end_block = self.cfg.start_new_block(); + + for arm_block in arm_end_blocks { + self.cfg.goto(unpack!(arm_block), outer_source_info, end_block); + } + + self.source_scope = outer_source_info.scope; + + end_block.unit() + } + /// Unschedules any drops in the top scope. /// /// This is only needed for `match` arm scopes, because they have one /// entrance per pattern, but only one exit. - crate fn clear_top_scope(&mut self, region_scope: region::Scope) { + pub(super) fn clear_top_scope(&mut self, region_scope: region::Scope) { let top_scope = self.scopes.scopes.last_mut().unwrap(); assert_eq!(top_scope.region_scope, region_scope); @@ -1046,6 +1173,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { top_scope.drops.clear(); top_scope.invalidate_cache(); } + + /// Unschedules the drop of the return place. + /// + /// If the return type of a function requires drop, then we schedule it + /// in the outermost scope so that it's dropped if there's a panic while + /// we drop any local variables. But we don't want to drop it if we + /// return normally. + crate fn unschedule_return_place_drop(&mut self) { + assert_eq!(self.scopes.scopes.len(), 1); + assert!(self.scopes.scopes[0].drops.len() <= 1); + self.scopes.scopes[0].drops.clear(); + } } /// Builds drops for pop_scope and leave_top_scope. @@ -1125,19 +1264,20 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { fn build_exit_tree( &mut self, mut drops: DropTree, - continue_block: Option, + continue_block: Option<(BasicBlock, DropIdx)>, ) -> Option> { let mut blocks = IndexVec::from_elem(None, &drops.drops); - blocks[ROOT_NODE] = continue_block; + blocks[ROOT_NODE] = continue_block.map(|(block, _)| block); drops.build_mir::(&mut self.cfg, &mut blocks); if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) { - let unwind_target = self.diverge_cleanup(); + let unwind_target = continue_block + .map_or_else(|| self.diverge_cleanup(), |(_, unwind_target)| unwind_target); let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1); for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) { match drop_data.0.kind { DropKind::Storage => { - if self.is_generator { + if self.generator_kind.is_some() { let unwind_drop = self .scopes .unwind_drops @@ -1164,7 +1304,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { } crate fn build_drop_trees(&mut self, should_abort: bool) { - if self.is_generator { + if self.generator_kind.is_some() { self.build_generator_drop_trees(should_abort); } else { Self::build_unwind_tree( diff --git a/src/test/mir-opt/box_expr.rs b/src/test/mir-opt/box_expr.rs index 197a46f0eb429..2ef4a3d8abbb5 100644 --- a/src/test/mir-opt/box_expr.rs +++ b/src/test/mir-opt/box_expr.rs @@ -37,7 +37,7 @@ impl Drop for S { // } // bb1: { // _1 = move _2; -// drop(_2) -> bb2; +// drop(_2) -> [return: bb2, unwind: bb6]; // } // bb2: { // StorageDead(_2); diff --git a/src/test/mir-opt/issue-62289.rs b/src/test/mir-opt/issue-62289.rs index b2b1a71e10291..aa8d57b701997 100644 --- a/src/test/mir-opt/issue-62289.rs +++ b/src/test/mir-opt/issue-62289.rs @@ -24,7 +24,7 @@ fn main() { // StorageLive(_3); // StorageLive(_4); // _4 = std::option::Option::::None; -// _3 = const as std::ops::Try>::into_result(move _4) -> [return: bb1, unwind: bb12]; +// _3 = const as std::ops::Try>::into_result(move _4) -> [return: bb1, unwind: bb13]; // } // bb1: { // StorageDead(_4); @@ -40,16 +40,16 @@ fn main() { // StorageLive(_8); // StorageLive(_9); // _9 = _6; -// _8 = const >::from(move _9) -> [return: bb4, unwind: bb12]; +// _8 = const >::from(move _9) -> [return: bb4, unwind: bb13]; // } // bb4: { // StorageDead(_9); -// _0 = const > as std::ops::Try>::from_error(move _8) -> [return: bb5, unwind: bb12]; +// _0 = const > as std::ops::Try>::from_error(move _8) -> [return: bb5, unwind: bb13]; // } // bb5: { // StorageDead(_8); // StorageDead(_6); -// drop(_2) -> bb9; +// drop(_2) -> [return: bb9, unwind: bb11]; // } // bb6: { // StorageLive(_10); @@ -57,12 +57,12 @@ fn main() { // (*_2) = _10; // StorageDead(_10); // _1 = move _2; -// drop(_2) -> [return: bb7, unwind: bb11]; +// drop(_2) -> [return: bb7, unwind: bb12]; // } // bb7: { // StorageDead(_2); // _0 = std::option::Option::>::Some(move _1,); -// drop(_1) -> bb8; +// drop(_1) -> [return: bb8, unwind: bb11]; // } // bb8: { // StorageDead(_1); @@ -79,12 +79,15 @@ fn main() { // return; // } // bb11 (cleanup): { -// drop(_1) -> bb13; +// drop(_0) -> bb14; // } // bb12 (cleanup): { -// drop(_2) -> bb13; +// drop(_1) -> bb14; // } // bb13 (cleanup): { +// drop(_2) -> bb14; +// } +// bb14 (cleanup): { // resume; // } // } diff --git a/src/test/ui/drop/dynamic-drop-async.rs b/src/test/ui/drop/dynamic-drop-async.rs index 88d36ab6aa664..acd4bff237ffa 100644 --- a/src/test/ui/drop/dynamic-drop-async.rs +++ b/src/test/ui/drop/dynamic-drop-async.rs @@ -46,6 +46,7 @@ impl Future for Defer { /// The `failing_op`-th operation will panic. struct Allocator { data: RefCell>, + name: &'static str, failing_op: usize, cur_ops: Cell, } @@ -57,23 +58,28 @@ impl Drop for Allocator { fn drop(&mut self) { let data = self.data.borrow(); if data.iter().any(|d| *d) { - panic!("missing free: {:?}", data); + panic!("missing free in {:?}: {:?}", self.name, data); } } } impl Allocator { - fn new(failing_op: usize) -> Self { - Allocator { failing_op, cur_ops: Cell::new(0), data: RefCell::new(vec![]) } + fn new(failing_op: usize, name: &'static str) -> Self { + Allocator { + failing_op, + name, + cur_ops: Cell::new(0), + data: RefCell::new(vec![]), + } } - fn alloc(&self) -> impl Future> + '_ { + fn alloc(self: &Rc) -> impl Future + 'static { self.fallible_operation(); let mut data = self.data.borrow_mut(); let addr = data.len(); data.push(true); - Defer { ready: false, value: Some(Ptr(addr, self)) } + Defer { ready: false, value: Some(Ptr(addr, self.clone())) } } fn fallible_operation(&self) { self.cur_ops.set(self.cur_ops.get() + 1); @@ -86,11 +92,11 @@ impl Allocator { // Type that tracks whether it was dropped and can panic when it's created or // destroyed. -struct Ptr<'a>(usize, &'a Allocator); -impl<'a> Drop for Ptr<'a> { +struct Ptr(usize, Rc); +impl Drop for Ptr { fn drop(&mut self) { match self.1.data.borrow_mut()[self.0] { - false => panic!("double free at index {:?}", self.0), + false => panic!("double free in {:?} at index {:?}", self.1.name, self.0), ref mut d => *d = false, } @@ -114,7 +120,7 @@ async fn dynamic_drop(a: Rc, c: bool) { }; } -struct TwoPtrs<'a>(Ptr<'a>, Ptr<'a>); +struct TwoPtrs(Ptr, Ptr); async fn struct_dynamic_drop(a: Rc, c0: bool, c1: bool, c: bool) { for i in 0..2 { let x; @@ -235,21 +241,62 @@ async fn move_ref_pattern(a: Rc) { a.alloc().await; } -fn run_test(cx: &mut Context<'_>, ref f: F) +async fn panic_after_return(a: Rc, c: bool) -> (Ptr,) { + a.alloc().await; + let p = a.alloc().await; + if c { + a.alloc().await; + let q = a.alloc().await; + // We use a return type that isn't used anywhere else to make sure that + // the return place doesn't incorrectly end up in the generator state. + return (a.alloc().await,); + } + (a.alloc().await,) +} + + +async fn panic_after_init_by_loop(a: Rc) { + a.alloc().await; + let p = a.alloc().await; + let q = loop { + a.alloc().await; + let r = a.alloc().await; + break a.alloc().await; + }; +} + +async fn panic_after_init_by_match_with_bindings_and_guard(a: Rc, b: bool) { + a.alloc().await; + let p = a.alloc().await; + let q = match a.alloc().await { + ref _x if b => { + a.alloc().await; + let r = a.alloc().await; + a.alloc().await + } + _x => { + a.alloc().await; + let r = a.alloc().await; + a.alloc().await + }, + }; +} + +fn run_test(cx: &mut Context<'_>, ref f: F, name: &'static str) where F: Fn(Rc) -> G, - G: Future, + G: Future, { for polls in 0.. { // Run without any panics to find which operations happen after the // penultimate `poll`. - let first_alloc = Rc::new(Allocator::new(usize::MAX)); + let first_alloc = Rc::new(Allocator::new(usize::MAX, name)); let mut fut = Box::pin(f(first_alloc.clone())); let mut ops_before_last_poll = 0; let mut completed = false; for _ in 0..polls { ops_before_last_poll = first_alloc.cur_ops.get(); - if let Poll::Ready(()) = fut.as_mut().poll(cx) { + if let Poll::Ready(_) = fut.as_mut().poll(cx) { completed = true; } } @@ -258,7 +305,7 @@ where // Start at `ops_before_last_poll` so that we will always be able to // `poll` the expected number of times. for failing_op in ops_before_last_poll..first_alloc.cur_ops.get() { - let alloc = Rc::new(Allocator::new(failing_op + 1)); + let alloc = Rc::new(Allocator::new(failing_op + 1, name)); let f = &f; let cx = &mut *cx; let result = panic::catch_unwind(panic::AssertUnwindSafe(move || { @@ -288,48 +335,58 @@ fn clone_waker(data: *const ()) -> RawWaker { RawWaker::new(data, &RawWakerVTable::new(clone_waker, drop, drop, drop)) } +macro_rules! run_test { + ($ctxt:expr, $e:expr) => { run_test($ctxt, $e, stringify!($e)); }; +} + fn main() { let waker = unsafe { Waker::from_raw(clone_waker(ptr::null())) }; let context = &mut Context::from_waker(&waker); - run_test(context, |a| dynamic_init(a, false)); - run_test(context, |a| dynamic_init(a, true)); - run_test(context, |a| dynamic_drop(a, false)); - run_test(context, |a| dynamic_drop(a, true)); - - run_test(context, |a| assignment(a, false, false)); - run_test(context, |a| assignment(a, false, true)); - run_test(context, |a| assignment(a, true, false)); - run_test(context, |a| assignment(a, true, true)); - - run_test(context, |a| array_simple(a)); - run_test(context, |a| vec_simple(a)); - run_test(context, |a| vec_unreachable(a)); - - run_test(context, |a| struct_dynamic_drop(a, false, false, false)); - run_test(context, |a| struct_dynamic_drop(a, false, false, true)); - run_test(context, |a| struct_dynamic_drop(a, false, true, false)); - run_test(context, |a| struct_dynamic_drop(a, false, true, true)); - run_test(context, |a| struct_dynamic_drop(a, true, false, false)); - run_test(context, |a| struct_dynamic_drop(a, true, false, true)); - run_test(context, |a| struct_dynamic_drop(a, true, true, false)); - run_test(context, |a| struct_dynamic_drop(a, true, true, true)); - - run_test(context, |a| field_assignment(a, false)); - run_test(context, |a| field_assignment(a, true)); - - run_test(context, |a| mixed_drop_and_nondrop(a)); - - run_test(context, |a| slice_pattern_one_of(a, 0)); - run_test(context, |a| slice_pattern_one_of(a, 1)); - run_test(context, |a| slice_pattern_one_of(a, 2)); - run_test(context, |a| slice_pattern_one_of(a, 3)); - - run_test(context, |a| subslice_pattern_from_end_with_drop(a, true, true)); - run_test(context, |a| subslice_pattern_from_end_with_drop(a, true, false)); - run_test(context, |a| subslice_pattern_from_end_with_drop(a, false, true)); - run_test(context, |a| subslice_pattern_from_end_with_drop(a, false, false)); - run_test(context, |a| subslice_pattern_reassign(a)); - - run_test(context, |a| move_ref_pattern(a)); + run_test!(context, |a| dynamic_init(a, false)); + run_test!(context, |a| dynamic_init(a, true)); + run_test!(context, |a| dynamic_drop(a, false)); + run_test!(context, |a| dynamic_drop(a, true)); + + run_test!(context, |a| assignment(a, false, false)); + run_test!(context, |a| assignment(a, false, true)); + run_test!(context, |a| assignment(a, true, false)); + run_test!(context, |a| assignment(a, true, true)); + + run_test!(context, |a| array_simple(a)); + run_test!(context, |a| vec_simple(a)); + run_test!(context, |a| vec_unreachable(a)); + + run_test!(context, |a| struct_dynamic_drop(a, false, false, false)); + run_test!(context, |a| struct_dynamic_drop(a, false, false, true)); + run_test!(context, |a| struct_dynamic_drop(a, false, true, false)); + run_test!(context, |a| struct_dynamic_drop(a, false, true, true)); + run_test!(context, |a| struct_dynamic_drop(a, true, false, false)); + run_test!(context, |a| struct_dynamic_drop(a, true, false, true)); + run_test!(context, |a| struct_dynamic_drop(a, true, true, false)); + run_test!(context, |a| struct_dynamic_drop(a, true, true, true)); + + run_test!(context, |a| field_assignment(a, false)); + run_test!(context, |a| field_assignment(a, true)); + + run_test!(context, |a| mixed_drop_and_nondrop(a)); + + run_test!(context, |a| slice_pattern_one_of(a, 0)); + run_test!(context, |a| slice_pattern_one_of(a, 1)); + run_test!(context, |a| slice_pattern_one_of(a, 2)); + run_test!(context, |a| slice_pattern_one_of(a, 3)); + + run_test!(context, |a| subslice_pattern_from_end_with_drop(a, true, true)); + run_test!(context, |a| subslice_pattern_from_end_with_drop(a, true, false)); + run_test!(context, |a| subslice_pattern_from_end_with_drop(a, false, true)); + run_test!(context, |a| subslice_pattern_from_end_with_drop(a, false, false)); + run_test!(context, |a| subslice_pattern_reassign(a)); + + run_test!(context, |a| move_ref_pattern(a)); + + run_test!(context, |a| panic_after_return(a, false)); + run_test!(context, |a| panic_after_return(a, true)); + run_test!(context, |a| panic_after_init_by_loop(a)); + run_test!(context, |a| panic_after_init_by_match_with_bindings_and_guard(a, false)); + run_test!(context, |a| panic_after_init_by_match_with_bindings_and_guard(a, true)); } diff --git a/src/test/ui/drop/dynamic-drop.rs b/src/test/ui/drop/dynamic-drop.rs index 451686d9ae21c..8a8a67d7d49e1 100644 --- a/src/test/ui/drop/dynamic-drop.rs +++ b/src/test/ui/drop/dynamic-drop.rs @@ -3,7 +3,6 @@ #![feature(generators, generator_trait, untagged_unions)] #![feature(move_ref_pattern)] - #![allow(unused_assignments)] #![allow(unused_variables)] @@ -18,6 +17,7 @@ struct InjectedFailure; struct Allocator { data: RefCell>, + name: &'static str, failing_op: usize, cur_ops: Cell, } @@ -29,17 +29,18 @@ impl Drop for Allocator { fn drop(&mut self) { let data = self.data.borrow(); if data.iter().any(|d| *d) { - panic!("missing free: {:?}", data); + panic!("missing free in {:?}: {:?}", self.name, data); } } } impl Allocator { - fn new(failing_op: usize) -> Self { + fn new(failing_op: usize, name: &'static str) -> Self { Allocator { failing_op: failing_op, cur_ops: Cell::new(0), - data: RefCell::new(vec![]) + data: RefCell::new(vec![]), + name, } } fn alloc(&self) -> Ptr<'_> { @@ -54,33 +55,17 @@ impl Allocator { data.push(true); Ptr(addr, self) } - // FIXME(#47949) Any use of this indicates a bug in rustc: we should never - // be leaking values in the cases here. - // - // Creates a `Ptr<'_>` and checks that the allocated value is leaked if the - // `failing_op` is in the list of exception. - fn alloc_leaked(&self, exceptions: Vec) -> Ptr<'_> { - let ptr = self.alloc(); - - if exceptions.iter().any(|operation| *operation == self.failing_op) { - let mut data = self.data.borrow_mut(); - data[ptr.0] = false; - } - ptr - } } struct Ptr<'a>(usize, &'a Allocator); impl<'a> Drop for Ptr<'a> { fn drop(&mut self) { match self.1.data.borrow_mut()[self.0] { - false => { - panic!("double free at index {:?}", self.0) - } - ref mut d => *d = false + false => panic!("double free in {:?} at index {:?}", self.1.name, self.0), + ref mut d => *d = false, } - self.1.cur_ops.set(self.1.cur_ops.get()+1); + self.1.cur_ops.set(self.1.cur_ops.get() + 1); if self.1.cur_ops.get() == self.1.failing_op { panic!(InjectedFailure); @@ -178,11 +163,7 @@ fn generator(a: &Allocator, run_count: usize) { assert!(run_count < 4); let mut gen = || { - (a.alloc(), - yield a.alloc(), - a.alloc(), - yield a.alloc() - ); + (a.alloc(), yield a.alloc(), a.alloc(), yield a.alloc()); }; for _ in 0..run_count { Pin::new(&mut gen).resume(()); @@ -206,28 +187,40 @@ fn vec_unreachable(a: &Allocator) { } fn slice_pattern_first(a: &Allocator) { - let[_x, ..] = [a.alloc(), a.alloc(), a.alloc()]; + let [_x, ..] = [a.alloc(), a.alloc(), a.alloc()]; } fn slice_pattern_middle(a: &Allocator) { - let[_, _x, _] = [a.alloc(), a.alloc(), a.alloc()]; + let [_, _x, _] = [a.alloc(), a.alloc(), a.alloc()]; } fn slice_pattern_two(a: &Allocator) { - let[_x, _, _y, _] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; + let [_x, _, _y, _] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; } fn slice_pattern_last(a: &Allocator) { - let[.., _y] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; + let [.., _y] = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; } fn slice_pattern_one_of(a: &Allocator, i: usize) { let array = [a.alloc(), a.alloc(), a.alloc(), a.alloc()]; let _x = match i { - 0 => { let [a, ..] = array; a } - 1 => { let [_, a, ..] = array; a } - 2 => { let [_, _, a, _] = array; a } - 3 => { let [_, _, _, a] = array; a } + 0 => { + let [a, ..] = array; + a + } + 1 => { + let [_, a, ..] = array; + a + } + 2 => { + let [_, _, a, _] = array; + a + } + 3 => { + let [_, _, _, a] = array; + a + } _ => panic!("unmatched"), }; } @@ -235,9 +228,9 @@ fn slice_pattern_one_of(a: &Allocator, i: usize) { fn subslice_pattern_from_end(a: &Allocator, arg: bool) { let a = [a.alloc(), a.alloc(), a.alloc()]; if arg { - let[.., _x, _] = a; + let [.., _x, _] = a; } else { - let[_, _y @ ..] = a; + let [_, _y @ ..] = a; } } @@ -249,45 +242,61 @@ fn subslice_pattern_from_end_with_drop(a: &Allocator, arg: bool, arg2: bool) { } if arg { - let[.., _x, _] = a; + let [.., _x, _] = a; } else { - let[_, _y @ ..] = a; + let [_, _y @ ..] = a; } } fn slice_pattern_reassign(a: &Allocator) { let mut ar = [a.alloc(), a.alloc()]; - let[_, _x] = ar; + let [_, _x] = ar; ar = [a.alloc(), a.alloc()]; - let[.., _y] = ar; + let [.., _y] = ar; } fn subslice_pattern_reassign(a: &Allocator) { let mut ar = [a.alloc(), a.alloc(), a.alloc()]; - let[_, _, _x] = ar; + let [_, _, _x] = ar; ar = [a.alloc(), a.alloc(), a.alloc()]; - let[_, _y @ ..] = ar; + let [_, _y @ ..] = ar; } fn index_field_mixed_ends(a: &Allocator) { let ar = [(a.alloc(), a.alloc()), (a.alloc(), a.alloc())]; - let[(_x, _), ..] = ar; - let[(_, _y), _] = ar; - let[_, (_, _w)] = ar; - let[.., (_z, _)] = ar; + let [(_x, _), ..] = ar; + let [(_, _y), _] = ar; + let [_, (_, _w)] = ar; + let [.., (_z, _)] = ar; } fn subslice_mixed_min_lengths(a: &Allocator, c: i32) { let ar = [(a.alloc(), a.alloc()), (a.alloc(), a.alloc())]; match c { - 0 => { let[_x, ..] = ar; } - 1 => { let[_x, _, ..] = ar; } - 2 => { let[_x, _] = ar; } - 3 => { let[(_x, _), _, ..] = ar; } - 4 => { let[.., (_x, _)] = ar; } - 5 => { let[.., (_x, _), _] = ar; } - 6 => { let [_y @ ..] = ar; } - _ => { let [_y @ .., _] = ar; } + 0 => { + let [_x, ..] = ar; + } + 1 => { + let [_x, _, ..] = ar; + } + 2 => { + let [_x, _] = ar; + } + 3 => { + let [(_x, _), _, ..] = ar; + } + 4 => { + let [.., (_x, _)] = ar; + } + 5 => { + let [.., (_x, _), _] = ar; + } + 6 => { + let [_y @ ..] = ar; + } + _ => { + let [_y @ .., _] = ar; + } } } @@ -297,87 +306,160 @@ fn move_ref_pattern(a: &Allocator) { } fn panic_after_return(a: &Allocator) -> Ptr<'_> { - // Panic in the drop of `p` or `q` can leak - let exceptions = vec![8, 9]; a.alloc(); let p = a.alloc(); { a.alloc(); let p = a.alloc(); - // FIXME (#47949) We leak values when we panic in a destructor after - // evaluating an expression with `rustc_mir::build::Builder::into`. - a.alloc_leaked(exceptions) + a.alloc() } } fn panic_after_return_expr(a: &Allocator) -> Ptr<'_> { - // Panic in the drop of `p` or `q` can leak - let exceptions = vec![8, 9]; a.alloc(); let p = a.alloc(); { a.alloc(); let q = a.alloc(); - // FIXME (#47949) - return a.alloc_leaked(exceptions); + return a.alloc(); } } fn panic_after_init(a: &Allocator) { - // Panic in the drop of `r` can leak - let exceptions = vec![8]; a.alloc(); let p = a.alloc(); let q = { a.alloc(); let r = a.alloc(); - // FIXME (#47949) - a.alloc_leaked(exceptions) + a.alloc() }; } fn panic_after_init_temp(a: &Allocator) { - // Panic in the drop of `r` can leak - let exceptions = vec![8]; a.alloc(); let p = a.alloc(); { a.alloc(); let r = a.alloc(); - // FIXME (#47949) - a.alloc_leaked(exceptions) + a.alloc() }; } fn panic_after_init_by_loop(a: &Allocator) { - // Panic in the drop of `r` can leak - let exceptions = vec![8]; a.alloc(); let p = a.alloc(); let q = loop { a.alloc(); let r = a.alloc(); - // FIXME (#47949) - break a.alloc_leaked(exceptions); + break a.alloc(); }; } -fn run_test(mut f: F) - where F: FnMut(&Allocator) +fn panic_after_init_by_match(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + loop { + let q = match b { + true => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + false => { + a.alloc(); + let r = a.alloc(); + break a.alloc(); + } + }; + return; + }; +} + +fn panic_after_init_by_match_with_guard(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + let q = match a.alloc() { + _ if b => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + _ => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + }; +} + +fn panic_after_init_by_match_with_bindings_and_guard(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + let q = match a.alloc() { + _x if b => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + _x => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + }; +} + +fn panic_after_init_by_match_with_ref_bindings_and_guard(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + let q = match a.alloc() { + ref _x if b => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + ref _x => { + a.alloc(); + let r = a.alloc(); + a.alloc() + } + }; +} + +fn panic_after_init_by_break_if(a: &Allocator, b: bool) { + a.alloc(); + let p = a.alloc(); + let q = loop { + let r = a.alloc(); + break if b { + let s = a.alloc(); + a.alloc() + } else { + a.alloc() + }; + }; +} + +fn run_test(mut f: F, name: &'static str) +where + F: FnMut(&Allocator), { - let first_alloc = Allocator::new(usize::MAX); + let first_alloc = Allocator::new(usize::MAX, name); f(&first_alloc); - for failing_op in 1..first_alloc.cur_ops.get()+1 { - let alloc = Allocator::new(failing_op); + for failing_op in 1..first_alloc.cur_ops.get() + 1 { + let alloc = Allocator::new(failing_op, name); let alloc = &alloc; let f = panic::AssertUnwindSafe(&mut f); let result = panic::catch_unwind(move || { f.0(alloc); }); match result { - Ok(..) => panic!("test executed {} ops but now {}", - first_alloc.cur_ops.get(), alloc.cur_ops.get()), + Ok(..) => panic!( + "test executed {} ops but now {}", + first_alloc.cur_ops.get(), + alloc.cur_ops.get() + ), Err(e) => { if e.downcast_ref::().is_none() { panic::resume_unwind(e); @@ -387,89 +469,106 @@ fn run_test(mut f: F) } } -fn run_test_nopanic(mut f: F) - where F: FnMut(&Allocator) +fn run_test_nopanic(mut f: F, name: &'static str) +where + F: FnMut(&Allocator), { - let first_alloc = Allocator::new(usize::MAX); + let first_alloc = Allocator::new(usize::MAX, name); f(&first_alloc); } +macro_rules! run_test { + ($e:expr) => { + run_test($e, stringify!($e)); + }; +} + fn main() { - run_test(|a| dynamic_init(a, false)); - run_test(|a| dynamic_init(a, true)); - run_test(|a| dynamic_drop(a, false)); - run_test(|a| dynamic_drop(a, true)); - - run_test(|a| assignment2(a, false, false)); - run_test(|a| assignment2(a, false, true)); - run_test(|a| assignment2(a, true, false)); - run_test(|a| assignment2(a, true, true)); - - run_test(|a| assignment1(a, false)); - run_test(|a| assignment1(a, true)); - - run_test(|a| array_simple(a)); - run_test(|a| vec_simple(a)); - run_test(|a| vec_unreachable(a)); - - run_test(|a| struct_dynamic_drop(a, false, false, false)); - run_test(|a| struct_dynamic_drop(a, false, false, true)); - run_test(|a| struct_dynamic_drop(a, false, true, false)); - run_test(|a| struct_dynamic_drop(a, false, true, true)); - run_test(|a| struct_dynamic_drop(a, true, false, false)); - run_test(|a| struct_dynamic_drop(a, true, false, true)); - run_test(|a| struct_dynamic_drop(a, true, true, false)); - run_test(|a| struct_dynamic_drop(a, true, true, true)); - - run_test(|a| field_assignment(a, false)); - run_test(|a| field_assignment(a, true)); - - run_test(|a| generator(a, 0)); - run_test(|a| generator(a, 1)); - run_test(|a| generator(a, 2)); - run_test(|a| generator(a, 3)); - - run_test(|a| mixed_drop_and_nondrop(a)); - - run_test(|a| slice_pattern_first(a)); - run_test(|a| slice_pattern_middle(a)); - run_test(|a| slice_pattern_two(a)); - run_test(|a| slice_pattern_last(a)); - run_test(|a| slice_pattern_one_of(a, 0)); - run_test(|a| slice_pattern_one_of(a, 1)); - run_test(|a| slice_pattern_one_of(a, 2)); - run_test(|a| slice_pattern_one_of(a, 3)); - - run_test(|a| subslice_pattern_from_end(a, true)); - run_test(|a| subslice_pattern_from_end(a, false)); - run_test(|a| subslice_pattern_from_end_with_drop(a, true, true)); - run_test(|a| subslice_pattern_from_end_with_drop(a, true, false)); - run_test(|a| subslice_pattern_from_end_with_drop(a, false, true)); - run_test(|a| subslice_pattern_from_end_with_drop(a, false, false)); - run_test(|a| slice_pattern_reassign(a)); - run_test(|a| subslice_pattern_reassign(a)); - - run_test(|a| index_field_mixed_ends(a)); - run_test(|a| subslice_mixed_min_lengths(a, 0)); - run_test(|a| subslice_mixed_min_lengths(a, 1)); - run_test(|a| subslice_mixed_min_lengths(a, 2)); - run_test(|a| subslice_mixed_min_lengths(a, 3)); - run_test(|a| subslice_mixed_min_lengths(a, 4)); - run_test(|a| subslice_mixed_min_lengths(a, 5)); - run_test(|a| subslice_mixed_min_lengths(a, 6)); - run_test(|a| subslice_mixed_min_lengths(a, 7)); - - run_test(|a| move_ref_pattern(a)); - - run_test(|a| { + run_test!(|a| dynamic_init(a, false)); + run_test!(|a| dynamic_init(a, true)); + run_test!(|a| dynamic_drop(a, false)); + run_test!(|a| dynamic_drop(a, true)); + + run_test!(|a| assignment2(a, false, false)); + run_test!(|a| assignment2(a, false, true)); + run_test!(|a| assignment2(a, true, false)); + run_test!(|a| assignment2(a, true, true)); + + run_test!(|a| assignment1(a, false)); + run_test!(|a| assignment1(a, true)); + + run_test!(|a| array_simple(a)); + run_test!(|a| vec_simple(a)); + run_test!(|a| vec_unreachable(a)); + + run_test!(|a| struct_dynamic_drop(a, false, false, false)); + run_test!(|a| struct_dynamic_drop(a, false, false, true)); + run_test!(|a| struct_dynamic_drop(a, false, true, false)); + run_test!(|a| struct_dynamic_drop(a, false, true, true)); + run_test!(|a| struct_dynamic_drop(a, true, false, false)); + run_test!(|a| struct_dynamic_drop(a, true, false, true)); + run_test!(|a| struct_dynamic_drop(a, true, true, false)); + run_test!(|a| struct_dynamic_drop(a, true, true, true)); + + run_test!(|a| field_assignment(a, false)); + run_test!(|a| field_assignment(a, true)); + + run_test!(|a| generator(a, 0)); + run_test!(|a| generator(a, 1)); + run_test!(|a| generator(a, 2)); + run_test!(|a| generator(a, 3)); + + run_test!(|a| mixed_drop_and_nondrop(a)); + + run_test!(|a| slice_pattern_first(a)); + run_test!(|a| slice_pattern_middle(a)); + run_test!(|a| slice_pattern_two(a)); + run_test!(|a| slice_pattern_last(a)); + run_test!(|a| slice_pattern_one_of(a, 0)); + run_test!(|a| slice_pattern_one_of(a, 1)); + run_test!(|a| slice_pattern_one_of(a, 2)); + run_test!(|a| slice_pattern_one_of(a, 3)); + + run_test!(|a| subslice_pattern_from_end(a, true)); + run_test!(|a| subslice_pattern_from_end(a, false)); + run_test!(|a| subslice_pattern_from_end_with_drop(a, true, true)); + run_test!(|a| subslice_pattern_from_end_with_drop(a, true, false)); + run_test!(|a| subslice_pattern_from_end_with_drop(a, false, true)); + run_test!(|a| subslice_pattern_from_end_with_drop(a, false, false)); + run_test!(|a| slice_pattern_reassign(a)); + run_test!(|a| subslice_pattern_reassign(a)); + + run_test!(|a| index_field_mixed_ends(a)); + run_test!(|a| subslice_mixed_min_lengths(a, 0)); + run_test!(|a| subslice_mixed_min_lengths(a, 1)); + run_test!(|a| subslice_mixed_min_lengths(a, 2)); + run_test!(|a| subslice_mixed_min_lengths(a, 3)); + run_test!(|a| subslice_mixed_min_lengths(a, 4)); + run_test!(|a| subslice_mixed_min_lengths(a, 5)); + run_test!(|a| subslice_mixed_min_lengths(a, 6)); + run_test!(|a| subslice_mixed_min_lengths(a, 7)); + + run_test!(|a| move_ref_pattern(a)); + + run_test!(|a| { panic_after_return(a); }); - run_test(|a| { + run_test!(|a| { panic_after_return_expr(a); }); - run_test(|a| panic_after_init(a)); - run_test(|a| panic_after_init_temp(a)); - run_test(|a| panic_after_init_by_loop(a)); - - run_test_nopanic(|a| union1(a)); + run_test!(|a| panic_after_init(a)); + run_test!(|a| panic_after_init_temp(a)); + run_test!(|a| panic_after_init_by_loop(a)); + run_test!(|a| panic_after_init_by_match(a, false)); + run_test!(|a| panic_after_init_by_match(a, true)); + run_test!(|a| panic_after_init_by_match_with_guard(a, false)); + run_test!(|a| panic_after_init_by_match_with_guard(a, true)); + run_test!(|a| panic_after_init_by_match_with_bindings_and_guard(a, false)); + run_test!(|a| panic_after_init_by_match_with_bindings_and_guard(a, true)); + run_test!(|a| panic_after_init_by_match_with_ref_bindings_and_guard(a, false)); + run_test!(|a| panic_after_init_by_match_with_ref_bindings_and_guard(a, true)); + run_test!(|a| panic_after_init_by_break_if(a, false)); + run_test!(|a| panic_after_init_by_break_if(a, true)); + + run_test_nopanic(|a| union1(a), "|a| union1(a)"); } From 9a926f93a46c75b6223acf015a8b60b66e3ce310 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 29 Feb 2020 16:30:06 +0000 Subject: [PATCH 15/16] Fix mir-opt tests --- .../mir-opt/const-promotion-extern-static.rs | 20 ++- .../mir-opt/generator-storage-dead-unwind.rs | 26 ++-- .../mir-opt/inline/inline-into-box-place.rs | 26 ++-- src/test/mir-opt/issue-62289.rs | 28 ++--- src/test/mir-opt/match-arm-scopes.rs | 84 ++++++------- src/test/mir-opt/match_false_edges.rs | 115 +++++++++--------- src/test/mir-opt/retag.rs | 31 +++-- 7 files changed, 166 insertions(+), 164 deletions(-) diff --git a/src/test/mir-opt/const-promotion-extern-static.rs b/src/test/mir-opt/const-promotion-extern-static.rs index f6f7d0910911c..e303b2274f4a5 100644 --- a/src/test/mir-opt/const-promotion-extern-static.rs +++ b/src/test/mir-opt/const-promotion-extern-static.rs @@ -19,10 +19,9 @@ fn main() {} // _3 = [move _4]; // _2 = &_3; // _1 = move _2 as &[&'static i32] (Pointer(Unsize)); -// _0 = const core::slice::::as_ptr(move _1) -> [return: bb2, unwind: bb1]; +// _0 = const core::slice::::as_ptr(move _1) -> bb1; // } -// ... -// bb2: { +// bb1: { // StorageDead(_5); // StorageDead(_3); // return; @@ -36,10 +35,9 @@ fn main() {} // _3 = [move _4]; // _2 = &_3; // _1 = move _2 as &[&'static i32] (Pointer(Unsize)); -// _0 = const core::slice::::as_ptr(move _1) -> [return: bb2, unwind: bb1]; +// _0 = const core::slice::::as_ptr(move _1) -> bb1; // } -// ... -// bb2: { +// bb1: { // StorageDead(_5); // StorageDead(_3); // return; @@ -51,10 +49,9 @@ fn main() {} // _6 = const BAR::promoted[0]; // _2 = &(*_6); // _1 = move _2 as &[&'static i32] (Pointer(Unsize)); -// _0 = const core::slice::::as_ptr(move _1) -> [return: bb2, unwind: bb1]; +// _0 = const core::slice::::as_ptr(move _1) -> bb1; // } -// ... -// bb2: { +// bb1: { // return; // } // END rustc.BAR.PromoteTemps.after.mir @@ -64,10 +61,9 @@ fn main() {} // _6 = const FOO::promoted[0]; // _2 = &(*_6); // _1 = move _2 as &[&'static i32] (Pointer(Unsize)); -// _0 = const core::slice::::as_ptr(move _1) -> [return: bb2, unwind: bb1]; +// _0 = const core::slice::::as_ptr(move _1) -> bb1; // } -// ... -// bb2: { +// bb1: { // return; // } // END rustc.FOO.PromoteTemps.after.mir diff --git a/src/test/mir-opt/generator-storage-dead-unwind.rs b/src/test/mir-opt/generator-storage-dead-unwind.rs index fd91945e09660..7aea985be999f 100644 --- a/src/test/mir-opt/generator-storage-dead-unwind.rs +++ b/src/test/mir-opt/generator-storage-dead-unwind.rs @@ -53,16 +53,17 @@ fn main() { // } // bb1: { // ... -// StorageLive(_7); -// _7 = move _2; -// _6 = const take::(move _7) -> [return: bb2, unwind: bb9]; +// StorageLive(_8); +// _8 = move _3; +// _7 = const take::(move _8) -> [return: bb2, unwind: bb9]; // } // bb2: { // StorageDead(_8); // StorageDead(_7); // StorageLive(_9); -// _9 = move _3; -// _8 = const take::(move _9) -> [return: bb3, unwind: bb8]; +// StorageLive(_10); +// _10 = move _4; +// _9 = const take::(move _10) -> [return: bb3, unwind: bb8]; // } // bb3: { // StorageDead(_10); @@ -70,7 +71,6 @@ fn main() { // ... // StorageDead(_4); // StorageDead(_3); -// StorageDead(_2); // drop(_1) -> [return: bb4, unwind: bb11]; // } // bb4: { @@ -78,36 +78,36 @@ fn main() { // } // bb5: { // ... -// StorageDead(_3); -// drop(_2) -> [return: bb6, unwind: bb12]; +// StorageDead(_4); +// drop(_3) -> [return: bb6, unwind: bb12]; // } // bb6: { -// StorageDead(_2); +// StorageDead(_3); // drop(_1) -> [return: bb7, unwind: bb11]; // } // bb7: { // generator_drop; // } // bb8 (cleanup): { +// StorageDead(_10); // StorageDead(_9); -// StorageDead(_8); // goto -> bb10; // } // bb9 (cleanup): { +// StorageDead(_8); // StorageDead(_7); -// StorageDead(_6); // goto -> bb10; // } // bb10 (cleanup): { +// StorageDead(_4); // StorageDead(_3); -// StorageDead(_2); // drop(_1) -> bb11; // } // bb11 (cleanup): { // resume; // } // bb12 (cleanup): { -// StorageDead(_2); +// StorageDead(_3); // drop(_1) -> bb11; // } diff --git a/src/test/mir-opt/inline/inline-into-box-place.rs b/src/test/mir-opt/inline/inline-into-box-place.rs index f368bdef6f8e2..462f1138ed734 100644 --- a/src/test/mir-opt/inline/inline-into-box-place.rs +++ b/src/test/mir-opt/inline/inline-into-box-place.rs @@ -20,23 +20,23 @@ fn main() { // StorageLive(_1); // StorageLive(_2); // _2 = Box(std::vec::Vec); -// (*_2) = const std::vec::Vec::::new() -> [return: bb2, unwind: bb4]; +// (*_2) = const std::vec::Vec::::new() -> [return: bb1, unwind: bb4]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // _1 = move _2; // StorageDead(_2); // _0 = (); -// drop(_1) -> [return: bb3, unwind: bb1]; +// drop(_1) -> [return: bb2, unwind: bb3]; // } -// bb3: { +// bb2: { // StorageDead(_1); // return; // } +// bb3 (cleanup): { +// resume; +// } // bb4 (cleanup): { -// _3 = const alloc::alloc::box_free::>(move (_2.0: std::ptr::Unique>)) -> bb1; +// _3 = const alloc::alloc::box_free::>(move (_2.0: std::ptr::Unique>)) -> bb3; // } // END rustc.main.Inline.before.mir // START rustc.main.Inline.after.mir @@ -60,13 +60,13 @@ fn main() { // _1 = move _2; // StorageDead(_2); // _0 = (); -// drop(_1) -> [return: bb2, unwind: bb1]; +// drop(_1) -> [return: bb1, unwind: bb2]; // } -// bb1 (cleanup): { -// resume; -// } -// bb2: { +// bb1: { // StorageDead(_1); // return; // } +// bb2 (cleanup): { +// resume; +// } // END rustc.main.Inline.after.mir diff --git a/src/test/mir-opt/issue-62289.rs b/src/test/mir-opt/issue-62289.rs index aa8d57b701997..494834e6ff8a4 100644 --- a/src/test/mir-opt/issue-62289.rs +++ b/src/test/mir-opt/issue-62289.rs @@ -29,36 +29,36 @@ fn main() { // bb1: { // StorageDead(_4); // _5 = discriminant(_3); -// switchInt(move _5) -> [0isize: bb6, 1isize: bb3, otherwise: bb2]; +// switchInt(move _5) -> [0isize: bb2, 1isize: bb4, otherwise: bb3]; // } // bb2: { -// unreachable; +// StorageLive(_10); +// _10 = ((_3 as Ok).0: u32); +// (*_2) = _10; +// StorageDead(_10); +// _1 = move _2; +// drop(_2) -> [return: bb7, unwind: bb12]; // } // bb3: { +// unreachable; +// } +// bb4: { // StorageLive(_6); // _6 = ((_3 as Err).0: std::option::NoneError); // StorageLive(_8); // StorageLive(_9); // _9 = _6; -// _8 = const >::from(move _9) -> [return: bb4, unwind: bb13]; +// _8 = const >::from(move _9) -> [return: bb5, unwind: bb13]; // } -// bb4: { +// bb5: { // StorageDead(_9); -// _0 = const > as std::ops::Try>::from_error(move _8) -> [return: bb5, unwind: bb13]; +// _0 = const > as std::ops::Try>::from_error(move _8) -> [return: bb6, unwind: bb13]; // } -// bb5: { +// bb6: { // StorageDead(_8); // StorageDead(_6); // drop(_2) -> [return: bb9, unwind: bb11]; // } -// bb6: { -// StorageLive(_10); -// _10 = ((_3 as Ok).0: u32); -// (*_2) = _10; -// StorageDead(_10); -// _1 = move _2; -// drop(_2) -> [return: bb7, unwind: bb12]; -// } // bb7: { // StorageDead(_2); // _0 = std::option::Option::>::Some(move _1,); diff --git a/src/test/mir-opt/match-arm-scopes.rs b/src/test/mir-opt/match-arm-scopes.rs index 27feb4ffb354a..12b82b455f84e 100644 --- a/src/test/mir-opt/match-arm-scopes.rs +++ b/src/test/mir-opt/match-arm-scopes.rs @@ -61,28 +61,35 @@ fn main() { // } // bb0: { // FakeRead(ForMatchedPlace, _2); -// switchInt((_2.0: bool)) -> [false: bb1, otherwise: bb4]; +// switchInt((_2.0: bool)) -> [false: bb1, otherwise: bb2]; // } // bb1: { -// falseEdges -> [real: bb7, imaginary: bb2]; +// falseEdges -> [real: bb8, imaginary: bb3]; // } // bb2: { -// falseEdges -> [real: bb13, imaginary: bb3]; +// switchInt((_2.1: bool)) -> [false: bb3, otherwise: bb4]; // } // bb3: { -// falseEdges -> [real: bb21, imaginary: bb22]; +// falseEdges -> [real: bb14, imaginary: bb5]; // } // bb4: { -// switchInt((_2.1: bool)) -> [false: bb2, otherwise: bb5]; +// switchInt((_2.0: bool)) -> [false: bb6, otherwise: bb5]; // } // bb5: { -// switchInt((_2.0: bool)) -> [false: bb22, otherwise: bb3]; +// falseEdges -> [real: bb22, imaginary: bb6]; // } -// bb6: { // arm 1 +// bb6: { +// StorageLive(_15); +// _15 = (_2.1: bool); +// StorageLive(_16); +// _16 = move (_2.2: std::string::String); +// goto -> bb21; +// } +// bb7: { // _0 = const 1i32; -// drop(_7) -> [return: bb19, unwind: bb27]; +// drop(_7) -> [return: bb20, unwind: bb27]; // } -// bb7: { // guard - first time +// bb8: { // StorageLive(_6); // _6 = &(_2.1: bool); // StorageLive(_8); @@ -93,23 +100,23 @@ fn main() { // StorageLive(_10); // _10 = _1; // FakeRead(ForMatchedPlace, _10); -// switchInt(_10) -> [false: bb9, otherwise: bb8]; +// switchInt(_10) -> [false: bb10, otherwise: bb9]; // } -// bb8: { -// falseEdges -> [real: bb10, imaginary: bb9]; +// bb9: { +// falseEdges -> [real: bb11, imaginary: bb10]; // } -// bb9: { // `else` block - first time +// bb10: { // _9 = (*_6); // StorageDead(_10); -// switchInt(move _9) -> [false: bb12, otherwise: bb11]; +// switchInt(move _9) -> [false: bb13, otherwise: bb12]; // } -// bb10: { // `return 3` - first time +// bb11: { // _0 = const 3i32; // StorageDead(_10); // StorageDead(_9); // goto -> bb25; // } -// bb11: { +// bb12: { // StorageDead(_9); // FakeRead(ForMatchGuard, _3); // FakeRead(ForMatchGuard, _4); @@ -119,15 +126,15 @@ fn main() { // _5 = (_2.1: bool); // StorageLive(_7); // _7 = move (_2.2: std::string::String); -// goto -> bb6; +// goto -> bb7; // } -// bb12: { // guard otherwise case - first time +// bb13: { // StorageDead(_9); // StorageDead(_8); // StorageDead(_6); -// falseEdges -> [real: bb4, imaginary: bb2]; +// falseEdges -> [real: bb2, imaginary: bb3]; // } -// bb13: { // guard - second time +// bb14: { // StorageLive(_6); // _6 = &(_2.0: bool); // StorageLive(_8); @@ -138,23 +145,23 @@ fn main() { // StorageLive(_13); // _13 = _1; // FakeRead(ForMatchedPlace, _13); -// switchInt(_13) -> [false: bb15, otherwise: bb14]; +// switchInt(_13) -> [false: bb16, otherwise: bb15]; // } -// bb14: { -// falseEdges -> [real: bb16, imaginary: bb15]; +// bb15: { +// falseEdges -> [real: bb17, imaginary: bb16]; // } -// bb15: { // `else` block - second time +// bb16: { // _12 = (*_6); // StorageDead(_13); -// switchInt(move _12) -> [false: bb18, otherwise: bb17]; +// switchInt(move _12) -> [false: bb19, otherwise: bb18]; // } -// bb16: { // `return 3` - second time +// bb17: { // _0 = const 3i32; // StorageDead(_13); // StorageDead(_12); // goto -> bb25; // } -// bb17: { // bindings for arm 1 +// bb18: { // StorageDead(_12); // FakeRead(ForMatchGuard, _3); // FakeRead(ForMatchGuard, _4); @@ -164,40 +171,33 @@ fn main() { // _5 = (_2.0: bool); // StorageLive(_7); // _7 = move (_2.2: std::string::String); -// goto -> bb6; +// goto -> bb7; // } -// bb18: { // Guard otherwise case - second time +// bb19: { // StorageDead(_12); // StorageDead(_8); // StorageDead(_6); -// falseEdges -> [real: bb5, imaginary: bb3]; +// falseEdges -> [real: bb4, imaginary: bb5]; // } -// bb19: { // rest of arm 1 +// bb20: { // StorageDead(_7); // StorageDead(_5); // StorageDead(_8); // StorageDead(_6); // goto -> bb24; // } -// bb20: { // arm 2 +// bb21: { // _0 = const 2i32; // drop(_16) -> [return: bb23, unwind: bb27]; // } -// bb21: { // bindings for arm 2 - first pattern -// StorageLive(_15); -// _15 = (_2.1: bool); -// StorageLive(_16); -// _16 = move (_2.2: std::string::String); -// goto -> bb20; -// } -// bb22: { // bindings for arm 2 - second pattern +// bb22: { // StorageLive(_15); // _15 = (_2.1: bool); // StorageLive(_16); // _16 = move (_2.2: std::string::String); -// goto -> bb20; +// goto -> bb21; // } -// bb23: { // rest of arm 2 +// bb23: { // StorageDead(_16); // StorageDead(_15); // goto -> bb24; diff --git a/src/test/mir-opt/match_false_edges.rs b/src/test/mir-opt/match_false_edges.rs index afc49c48a7f05..367a467272916 100644 --- a/src/test/mir-opt/match_false_edges.rs +++ b/src/test/mir-opt/match_false_edges.rs @@ -4,7 +4,7 @@ fn guard() -> bool { false } -fn guard2(_:i32) -> bool { +fn guard2(_: i32) -> bool { true } @@ -45,24 +45,25 @@ fn main() { // _2 = std::option::Option::::Some(const 42i32,); // FakeRead(ForMatchedPlace, _2); // _3 = discriminant(_2); -// switchInt(move _3) -> [0isize: bb3, 1isize: bb1, otherwise: bb4]; +// switchInt(move _3) -> [0isize: bb1, 1isize: bb2, otherwise: bb4]; // } -// bb1: { -// falseEdges -> [real: bb5, imaginary: bb2]; //pre_binding1 +// bb1: { // pre_binding3 and arm3 +// _1 = (const 3i32, const 3i32); +// goto -> bb10; // } // bb2: { -// falseEdges -> [real: bb9, imaginary: bb3]; //pre_binding2 +// falseEdges -> [real: bb5, imaginary: bb3]; //pre_binding1 // } -// bb3: { // pre_binding3 and arm3 -// _1 = (const 3i32, const 3i32); -// goto -> bb10; +// bb3: { +// falseEdges -> [real: bb9, imaginary: bb1]; //pre_binding2 // } // bb4: { // unreachable; // } // bb5: { // binding1 and guard // StorageLive(_6); -// _6 = &(((promoted[0]: std::option::Option) as Some).0: i32); +// _11 = const full_tested_match::promoted[0]; +// _6 = &(((*_11) as Some).0: i32); // _4 = &shallow _2; // StorageLive(_7); // _7 = const guard() -> [return: bb6, unwind: bb11]; @@ -87,7 +88,7 @@ fn main() { // bb8: { // to pre_binding2 // StorageDead(_7); // StorageDead(_6); -// goto -> bb2; +// goto -> bb3; // } // bb9: { // arm2 // StorageLive(_9); @@ -116,28 +117,38 @@ fn main() { // _2 = std::option::Option::::Some(const 42i32,); // FakeRead(ForMatchedPlace, _2); // _3 = discriminant(_2); -// switchInt(move _3) -> [0isize: bb2, 1isize: bb1, otherwise: bb3]; +// switchInt(move _3) -> [0isize: bb1, 1isize: bb2, otherwise: bb4]; // } // bb1: { -// falseEdges -> [real: bb4, imaginary: bb2]; +// falseEdges -> [real: bb9, imaginary: bb3]; // } // bb2: { -// falseEdges -> [real: bb8, imaginary: bb9]; +// falseEdges -> [real: bb5, imaginary: bb1]; // } -// bb3: { +// bb3: { // binding3 and arm3 +// StorageLive(_9); +// _9 = ((_2 as Some).0: i32); +// StorageLive(_10); +// _10 = _9; +// _1 = (const 2i32, move _10); +// StorageDead(_10); +// StorageDead(_9); +// goto -> bb10; +// } +// bb4: { // unreachable; // } -// bb4: { // binding1 and guard +// bb5: { // binding1 and guard // StorageLive(_6); // _6 = &((_2 as Some).0: i32); // _4 = &shallow _2; // StorageLive(_7); -// _7 = const guard() -> [return: bb5, unwind: bb11]; +// _7 = const guard() -> [return: bb6, unwind: bb11]; // } -// bb5: { // end of guard -// switchInt(move _7) -> [false: bb7, otherwise: bb6]; +// bb6: { // end of guard +// switchInt(move _7) -> [false: bb8, otherwise: bb7]; // } -// bb6: { +// bb7: { // StorageDead(_7); // FakeRead(ForMatchGuard, _4); // FakeRead(ForGuardBinding, _6); @@ -151,25 +162,15 @@ fn main() { // StorageDead(_6); // goto -> bb10; // } -// bb7: { // to pre_binding3 (can skip 2 since this is `Some`) +// bb8: { // to pre_binding3 (can skip 2 since this is `Some`) // StorageDead(_7); // StorageDead(_6); -// falseEdges -> [real: bb9, imaginary: bb2]; +// falseEdges -> [real: bb3, imaginary: bb1]; // } -// bb8: { // arm2 +// bb9: { // arm2 // _1 = (const 3i32, const 3i32); // goto -> bb10; // } -// bb9: { // binding3 and arm3 -// StorageLive(_9); -// _9 = ((_2 as Some).0: i32); -// StorageLive(_10); -// _10 = _9; -// _1 = (const 2i32, move _10); -// StorageDead(_10); -// StorageDead(_9); -// goto -> bb10; -// } // bb10: { // StorageDead(_2); // StorageDead(_1); @@ -187,28 +188,35 @@ fn main() { // _2 = std::option::Option::::Some(const 1i32,); // FakeRead(ForMatchedPlace, _2); // _4 = discriminant(_2); -// switchInt(move _4) -> [1isize: bb1, otherwise: bb2]; +// switchInt(move _4) -> [1isize: bb2, otherwise: bb1]; // } // bb1: { -// falseEdges -> [real: bb4, imaginary: bb2]; +// falseEdges -> [real: bb9, imaginary: bb4]; // } // bb2: { -// falseEdges -> [real: bb8, imaginary: bb3]; +// falseEdges -> [real: bb5, imaginary: bb1]; // } // bb3: { -// falseEdges -> [real: bb9, imaginary: bb13]; +// StorageLive(_14); +// _14 = _2; +// _1 = const 4i32; +// StorageDead(_14); +// goto -> bb14; // } // bb4: { +// falseEdges -> [real: bb10, imaginary: bb3]; +// } +// bb5: { // StorageLive(_7); // _7 = &((_2 as Some).0: i32); // _5 = &shallow _2; // StorageLive(_8); -// _8 = const guard() -> [return: bb5, unwind: bb15]; +// _8 = const guard() -> [return: bb6, unwind: bb15]; // } -// bb5: { //end of guard1 -// switchInt(move _8) -> [false: bb7, otherwise: bb6]; +// bb6: { //end of guard1 +// switchInt(move _8) -> [false: bb8, otherwise: bb7]; // } -// bb6: { +// bb7: { // StorageDead(_8); // FakeRead(ForMatchGuard, _5); // FakeRead(ForGuardBinding, _7); @@ -219,32 +227,32 @@ fn main() { // StorageDead(_7); // goto -> bb14; // } -// bb7: { +// bb8: { // StorageDead(_8); // StorageDead(_7); -// falseEdges -> [real: bb2, imaginary: bb2]; +// falseEdges -> [real: bb1, imaginary: bb1]; // } -// bb8: { // binding2 & arm2 +// bb9: { // binding2 & arm2 // StorageLive(_9); // _9 = _2; // _1 = const 2i32; // StorageDead(_9); // goto -> bb14; // } -// bb9: { // binding3: Some(y) if guard2(y) +// bb10: { // binding3: Some(y) if guard2(y) // StorageLive(_11); // _11 = &((_2 as Some).0: i32); // _5 = &shallow _2; // StorageLive(_12); // StorageLive(_13); // _13 = (*_11); -// _12 = const guard2(move _13) -> [return: bb10, unwind: bb15]; +// _12 = const guard2(move _13) -> [return: bb11, unwind: bb15]; // } -// bb10: { // end of guard2 +// bb11: { // end of guard2 // StorageDead(_13); -// switchInt(move _12) -> [false: bb12, otherwise: bb11]; +// switchInt(move _12) -> [false: bb13, otherwise: bb12]; // } -// bb11: { // binding4 & arm4 +// bb12: { // binding4 & arm4 // StorageDead(_12); // FakeRead(ForMatchGuard, _5); // FakeRead(ForGuardBinding, _11); @@ -255,17 +263,10 @@ fn main() { // StorageDead(_11); // goto -> bb14; // } -// bb12: { +// bb13: { // StorageDead(_12); // StorageDead(_11); -// falseEdges -> [real: bb13, imaginary: bb13]; -// } -// bb13: { -// StorageLive(_14); -// _14 = _2; -// _1 = const 4i32; -// StorageDead(_14); -// goto -> bb14; +// falseEdges -> [real: bb3, imaginary: bb3]; // } // bb14: { // StorageDead(_2); diff --git a/src/test/mir-opt/retag.rs b/src/test/mir-opt/retag.rs index 00795dc56df4b..bb7fc3b9ad0ed 100644 --- a/src/test/mir-opt/retag.rs +++ b/src/test/mir-opt/retag.rs @@ -8,8 +8,12 @@ struct Test(i32); impl Test { // Make sure we run the pass on a method, not just on bare functions. - fn foo<'x>(&self, x: &'x mut i32) -> &'x mut i32 { x } - fn foo_shr<'x>(&self, x: &'x i32) -> &'x i32 { x } + fn foo<'x>(&self, x: &'x mut i32) -> &'x mut i32 { + x + } + fn foo_shr<'x>(&self, x: &'x i32) -> &'x i32 { + x + } } impl Drop for Test { @@ -27,7 +31,10 @@ fn main() { } // Also test closures - let c: fn(&i32) -> &i32 = |x: &i32| -> &i32 { let _y = x; x }; + let c: fn(&i32) -> &i32 = |x: &i32| -> &i32 { + let _y = x; + x + }; let _w = c(&x); // need to call `foo_shr` or it doesn't even get generated @@ -82,25 +89,23 @@ fn main() { // _10 = move _8; // Retag(_10); // ... -// _13 = &mut (*_10); -// Retag(_13); -// _12 = move _13 as *mut i32 (Misc); +// _12 = &raw mut (*_10); // Retag([raw] _12); // ... -// _16 = move _17(move _18) -> bb3; +// _15 = move _16(move _17) -> bb3; // } // // bb3: { -// Retag(_16); +// Retag(_15); // ... -// _20 = const Test::foo_shr(move _21, move _23) -> [return: bb4, unwind: bb6]; +// _19 = const Test::foo_shr(move _20, move _22) -> [return: bb4, unwind: bb6]; // } // // ... // } // END rustc.main.EraseRegions.after.mir // START rustc.main-{{closure}}.EraseRegions.after.mir -// fn main::{{closure}}#0(_1: &[closure@HirId { owner: DefIndex(13), local_id: 72 }], _2: &i32) -> &i32 { +// fn main::{{closure}}#0(_1: &[closure@main::{{closure}}#0], _2: &i32) -> &i32 { // ... // bb0: { // Retag([fn entry] _1); @@ -115,8 +120,8 @@ fn main() { // } // } // END rustc.main-{{closure}}.EraseRegions.after.mir -// START rustc.ptr-real_drop_in_place.Test.SimplifyCfg-make_shim.after.mir -// fn std::ptr::real_drop_in_place(_1: &mut Test) -> () { +// START rustc.ptr-drop_in_place.Test.SimplifyCfg-make_shim.after.mir +// fn std::intrinsics::drop_in_place(_1: *mut Test) -> () { // ... // bb0: { // Retag([raw] _1); @@ -128,4 +133,4 @@ fn main() { // return; // } // } -// END rustc.ptr-real_drop_in_place.Test.SimplifyCfg-make_shim.after.mir +// END rustc.ptr-drop_in_place.Test.SimplifyCfg-make_shim.after.mir From ba989b2b8a0b10575f985e5743434d93d91e5ebf Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 29 Feb 2020 17:48:09 +0000 Subject: [PATCH 16/16] Add some more comments --- src/librustc_mir_build/build/block.rs | 2 +- src/librustc_mir_build/build/into.rs | 2 +- src/librustc_mir_build/build/matches/test.rs | 2 +- src/librustc_mir_build/build/scope.rs | 31 ++++++++++++++------ 4 files changed, 25 insertions(+), 12 deletions(-) diff --git a/src/librustc_mir_build/build/block.rs b/src/librustc_mir_build/build/block.rs index 9303c811df45f..2f16693c61efb 100644 --- a/src/librustc_mir_build/build/block.rs +++ b/src/librustc_mir_build/build/block.rs @@ -8,7 +8,7 @@ use rustc_hir as hir; use rustc_span::Span; impl<'a, 'tcx> Builder<'a, 'tcx> { - pub fn ast_block( + crate fn ast_block( &mut self, destination: &Place<'tcx>, scope: Option, diff --git a/src/librustc_mir_build/build/into.rs b/src/librustc_mir_build/build/into.rs index e57f10f0b14e9..117e445fac367 100644 --- a/src/librustc_mir_build/build/into.rs +++ b/src/librustc_mir_build/build/into.rs @@ -20,7 +20,7 @@ pub(in crate::build) trait EvalInto<'tcx> { } impl<'a, 'tcx> Builder<'a, 'tcx> { - pub fn into( + crate fn into( &mut self, destination: &Place<'tcx>, scope: Option, diff --git a/src/librustc_mir_build/build/matches/test.rs b/src/librustc_mir_build/build/matches/test.rs index 6b47937a89e2a..c8a63cb25f862 100644 --- a/src/librustc_mir_build/build/matches/test.rs +++ b/src/librustc_mir_build/build/matches/test.rs @@ -443,7 +443,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { literal: method, }), args: vec![val, expect], - destination: Some((eq_result.clone(), eq_block)), + destination: Some((eq_result, eq_block)), cleanup: None, from_hir_call: false, }, diff --git a/src/librustc_mir_build/build/scope.rs b/src/librustc_mir_build/build/scope.rs index 0971f8bdb59ec..94668d1e5f37b 100644 --- a/src/librustc_mir_build/build/scope.rs +++ b/src/librustc_mir_build/build/scope.rs @@ -235,6 +235,9 @@ trait DropTreeBuilder<'tcx> { impl DropTree { fn new() -> Self { + // The root node of the tree doesn't represent a drop, but instead + // represents the block in the tree that should be jumped to once all + // of the required drops have been performed. let fake_source_info = SourceInfo { span: DUMMY_SP, scope: OUTERMOST_SOURCE_SCOPE }; let fake_data = DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage }; @@ -256,12 +259,17 @@ impl DropTree { self.entry_points.push((to, from)); } + /// Builds the MIR for a given drop tree. + /// + /// `blocks` should have the same length as `self.drops`, and may have its + /// first value set to some already existing block. fn build_mir<'tcx, T: DropTreeBuilder<'tcx>>( &mut self, cfg: &mut CFG<'tcx>, blocks: &mut IndexVec>, ) { debug!("DropTree::lower_to_mir(drops = {:#?})", self); + debug_assert_eq!(blocks.len(), self.drops.len()); self.assign_blocks::(cfg, blocks); self.link_blocks(cfg, blocks) @@ -1105,7 +1113,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { return self.cfg.start_new_block().unit(); } let mut first_arm = true; - let cached_unwind_block = self.diverge_cleanup(); let arm_end_blocks: Vec<_> = arm_candidates .into_iter() .map(|(arm, candidate)| { @@ -1115,8 +1122,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { destination_scope.map(|scope| { self.unschedule_drop(scope, destination.as_local().unwrap()); }); - let top_scope = &mut self.scopes.scopes.last_mut().unwrap(); - top_scope.cached_unwind_block = Some(cached_unwind_block); } let arm_source_info = self.source_info(arm.span); @@ -1394,10 +1399,16 @@ impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop { cfg.start_new_block() } fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { - let kind = &mut cfg.block_data_mut(from).terminator_mut().kind; - if let TerminatorKind::Yield { drop, .. } = kind { + let term = cfg.block_data_mut(from).terminator_mut(); + if let TerminatorKind::Yield { ref mut drop, .. } = term.kind { *drop = Some(to); - }; + } else { + span_bug!( + term.source_info.span, + "cannot enter generator drop tree from {:?}", + term.kind + ) + } } } @@ -1408,8 +1419,8 @@ impl<'tcx> DropTreeBuilder<'tcx> for Unwind { cfg.start_new_cleanup_block() } fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) { - let term = &mut cfg.block_data_mut(from).terminator_mut().kind; - match term { + let term = &mut cfg.block_data_mut(from).terminator_mut(); + match &mut term.kind { TerminatorKind::Drop { unwind, .. } | TerminatorKind::DropAndReplace { unwind, .. } | TerminatorKind::FalseUnwind { unwind, .. } @@ -1425,7 +1436,9 @@ impl<'tcx> DropTreeBuilder<'tcx> for Unwind { | TerminatorKind::Unreachable | TerminatorKind::Yield { .. } | TerminatorKind::GeneratorDrop - | TerminatorKind::FalseEdges { .. } => bug!("cannot unwind from {:?}", term), + | TerminatorKind::FalseEdges { .. } => { + span_bug!(term.source_info.span, "cannot unwind from {:?}", term.kind) + } } } }
{blk}
{blk}