Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit b397c11

Browse files
committedJun 22, 2019
Auto merge of #62038 - Zoxc:pre-gc-dep-graph, r=<try>
[WIP] Make dep node indices persistent between sessions This makes marking dep nodes green faster (and lock free in the case with no diagnostics). This change is split out from #60035. Unlike #60035 this makes loading the dep graph slower because it loads 2 copies of the dep graph, one immutable and one mutable. Based on #61845, #61779 and #61923.
2 parents e562b24 + 290912a commit b397c11

File tree

19 files changed

+906
-512
lines changed

19 files changed

+906
-512
lines changed
 

‎Cargo.lock

+1
Original file line numberDiff line numberDiff line change
@@ -2893,6 +2893,7 @@ name = "rustc_data_structures"
28932893
version = "0.0.0"
28942894
dependencies = [
28952895
"cfg-if 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
2896+
"crossbeam-utils 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
28962897
"ena 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
28972898
"graphviz 0.0.0",
28982899
"indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",

‎src/librustc/dep_graph/graph.rs

+512-300
Large diffs are not rendered by default.

‎src/librustc/dep_graph/mod.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ pub mod cgu_reuse_tracker;
1111
pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig};
1212
pub use self::dep_node::{DepNode, DepKind, DepConstructor, WorkProductId, RecoverKey, label_strs};
1313
pub use self::graph::{DepGraph, WorkProduct, DepNodeIndex, DepNodeColor, TaskDeps, hash_result};
14-
pub use self::graph::WorkProductFileKind;
14+
pub use self::graph::{DepGraphArgs, WorkProductFileKind, CurrentDepGraph};
1515
pub use self::prev::PreviousDepGraph;
1616
pub use self::query::DepGraphQuery;
1717
pub use self::safe::AssertDepGraphSafe;
1818
pub use self::safe::DepGraphSafe;
19-
pub use self::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
19+
pub use self::serialized::SerializedDepGraph;

‎src/librustc/dep_graph/prev.rs

+41-14
Original file line numberDiff line numberDiff line change
@@ -1,43 +1,70 @@
11
use crate::ich::Fingerprint;
22
use rustc_data_structures::fx::FxHashMap;
3-
use super::dep_node::DepNode;
4-
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
3+
use rustc_data_structures::indexed_vec::IndexVec;
4+
use rustc_data_structures::sync::AtomicCell;
5+
use super::dep_node::{DepNode, DepKind};
6+
use super::graph::{DepNodeIndex, DepNodeState};
7+
use super::serialized::SerializedDepGraph;
58

6-
#[derive(Debug, RustcEncodable, RustcDecodable, Default)]
9+
#[derive(Debug, Default)]
710
pub struct PreviousDepGraph {
811
data: SerializedDepGraph,
9-
index: FxHashMap<DepNode, SerializedDepNodeIndex>,
12+
pub(super) index: FxHashMap<DepNode, DepNodeIndex>,
13+
pub(super) unused: Vec<DepNodeIndex>,
1014
}
1115

1216
impl PreviousDepGraph {
13-
pub fn new(data: SerializedDepGraph) -> PreviousDepGraph {
17+
pub fn new_and_state(
18+
data: SerializedDepGraph
19+
) -> (PreviousDepGraph, IndexVec<DepNodeIndex, AtomicCell<DepNodeState>>) {
20+
let mut unused = Vec::new();
21+
22+
let state: IndexVec<_, _> = data.nodes.iter_enumerated().map(|(index, node)| {
23+
if node.kind == DepKind::Null {
24+
// There might be `DepKind::Null` nodes due to thread-local dep node indices
25+
// that didn't get assigned anything.
26+
// We also changed outdated nodes to `DepKind::Null`.
27+
unused.push(index);
28+
AtomicCell::new(DepNodeState::Invalid)
29+
} else {
30+
AtomicCell::new(DepNodeState::Unknown)
31+
}
32+
}).collect();
33+
1434
let index: FxHashMap<_, _> = data.nodes
1535
.iter_enumerated()
16-
.map(|(idx, &dep_node)| (dep_node, idx))
36+
.filter_map(|(idx, &dep_node)| {
37+
if dep_node.kind == DepKind::Null {
38+
None
39+
} else {
40+
Some((dep_node, idx))
41+
}
42+
})
1743
.collect();
18-
PreviousDepGraph { data, index }
44+
45+
(PreviousDepGraph { data, index, unused }, state)
1946
}
2047

2148
#[inline]
2249
pub fn edge_targets_from(
2350
&self,
24-
dep_node_index: SerializedDepNodeIndex
25-
) -> &[SerializedDepNodeIndex] {
51+
dep_node_index: DepNodeIndex
52+
) -> &[DepNodeIndex] {
2653
self.data.edge_targets_from(dep_node_index)
2754
}
2855

2956
#[inline]
30-
pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
57+
pub fn index_to_node(&self, dep_node_index: DepNodeIndex) -> DepNode {
3158
self.data.nodes[dep_node_index]
3259
}
3360

3461
#[inline]
35-
pub fn node_to_index(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
62+
pub fn node_to_index(&self, dep_node: &DepNode) -> DepNodeIndex {
3663
self.index[dep_node]
3764
}
3865

3966
#[inline]
40-
pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
67+
pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
4168
self.index.get(dep_node).cloned()
4269
}
4370

@@ -50,12 +77,12 @@ impl PreviousDepGraph {
5077

5178
#[inline]
5279
pub fn fingerprint_by_index(&self,
53-
dep_node_index: SerializedDepNodeIndex)
80+
dep_node_index: DepNodeIndex)
5481
-> Fingerprint {
5582
self.data.fingerprints[dep_node_index]
5683
}
5784

5885
pub fn node_count(&self) -> usize {
59-
self.index.len()
86+
self.data.nodes.len()
6087
}
6188
}

‎src/librustc/dep_graph/serialized.rs

+8-11
Original file line numberDiff line numberDiff line change
@@ -2,34 +2,31 @@
22
33
use crate::dep_graph::DepNode;
44
use crate::ich::Fingerprint;
5-
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
6-
7-
newtype_index! {
8-
pub struct SerializedDepNodeIndex { .. }
9-
}
5+
use rustc_data_structures::indexed_vec::IndexVec;
6+
use super::graph::DepNodeIndex;
107

118
/// Data for use when recompiling the **current crate**.
129
#[derive(Debug, RustcEncodable, RustcDecodable, Default)]
1310
pub struct SerializedDepGraph {
1411
/// The set of all DepNodes in the graph
15-
pub nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
12+
pub nodes: IndexVec<DepNodeIndex, DepNode>,
1613
/// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
1714
/// the DepNode at the same index in the nodes vector.
18-
pub fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
15+
pub fingerprints: IndexVec<DepNodeIndex, Fingerprint>,
1916
/// For each DepNode, stores the list of edges originating from that
2017
/// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
2118
/// which holds the actual DepNodeIndices of the target nodes.
22-
pub edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)>,
19+
pub edge_list_indices: IndexVec<DepNodeIndex, (u32, u32)>,
2320
/// A flattened list of all edge targets in the graph. Edge sources are
2421
/// implicit in edge_list_indices.
25-
pub edge_list_data: Vec<SerializedDepNodeIndex>,
22+
pub edge_list_data: Vec<DepNodeIndex>,
2623
}
2724

2825
impl SerializedDepGraph {
2926
#[inline]
3027
pub fn edge_targets_from(&self,
31-
source: SerializedDepNodeIndex)
32-
-> &[SerializedDepNodeIndex] {
28+
source: DepNodeIndex)
29+
-> &[DepNodeIndex] {
3330
let targets = self.edge_list_indices[source];
3431
&self.edge_list_data[targets.0 as usize..targets.1 as usize]
3532
}

‎src/librustc/query/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@ use crate::ty::query::QueryDescription;
22
use crate::ty::query::queries;
33
use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt};
44
use crate::ty::subst::SubstsRef;
5-
use crate::dep_graph::SerializedDepNodeIndex;
65
use crate::hir::def_id::{CrateNum, DefId, DefIndex};
76
use crate::mir;
87
use crate::mir::interpret::GlobalId;
8+
use crate::dep_graph::DepNodeIndex;
99
use crate::traits;
1010
use crate::traits::query::{
1111
CanonicalPredicateGoal, CanonicalProjectionGoal,

‎src/librustc/ty/context.rs

+22-20
Original file line numberDiff line numberDiff line change
@@ -48,14 +48,14 @@ use crate::util::common::ErrorReported;
4848
use crate::util::nodemap::{DefIdMap, DefIdSet, ItemLocalMap, ItemLocalSet};
4949
use crate::util::nodemap::{FxHashMap, FxHashSet};
5050
use errors::DiagnosticBuilder;
51-
use rustc_data_structures::interner::HashInterner;
5251
use smallvec::SmallVec;
5352
use rustc_data_structures::stable_hasher::{HashStable, hash_stable_hashmap,
5453
StableHasher, StableHasherResult,
5554
StableVec};
5655
use arena::SyncDroplessArena;
5756
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
5857
use rustc_data_structures::sync::{Lrc, Lock, WorkerLocal};
58+
use rustc_data_structures::sharded::ShardedHashMap;
5959
use std::any::Any;
6060
use std::borrow::Borrow;
6161
use std::cmp::Ordering;
@@ -93,7 +93,7 @@ impl AllArenas {
9393
}
9494
}
9595

96-
type InternedSet<'tcx, T> = Lock<FxHashMap<Interned<'tcx, T>, ()>>;
96+
type InternedSet<'tcx, T> = ShardedHashMap<Interned<'tcx, T>, ()>;
9797

9898
pub struct CtxtInterners<'tcx> {
9999
/// The arena that types, regions, etc are allocated from
@@ -147,7 +147,7 @@ impl<'tcx> CtxtInterners<'tcx> {
147147
// determine that all contents are in the global tcx.
148148
// See comments on Lift for why we can't use that.
149149
if flags.flags.intersects(ty::TypeFlags::KEEP_IN_LOCAL_TCX) {
150-
local.type_.borrow_mut().intern(st, |st| {
150+
local.type_.intern(st, |st| {
151151
let ty_struct = TyS {
152152
sty: st,
153153
flags: flags.flags,
@@ -171,7 +171,7 @@ impl<'tcx> CtxtInterners<'tcx> {
171171
Interned(local.arena.alloc(ty_struct))
172172
}).0
173173
} else {
174-
global.type_.borrow_mut().intern(st, |st| {
174+
global.type_.intern(st, |st| {
175175
let ty_struct = TyS {
176176
sty: st,
177177
flags: flags.flags,
@@ -964,7 +964,7 @@ impl<'tcx> CommonTypes<'tcx> {
964964
impl<'tcx> CommonLifetimes<'tcx> {
965965
fn new(interners: &CtxtInterners<'tcx>) -> CommonLifetimes<'tcx> {
966966
let mk = |r| {
967-
interners.region.borrow_mut().intern(r, |r| {
967+
interners.region.intern(r, |r| {
968968
Interned(interners.arena.alloc(r))
969969
}).0
970970
};
@@ -980,7 +980,7 @@ impl<'tcx> CommonLifetimes<'tcx> {
980980
impl<'tcx> CommonConsts<'tcx> {
981981
fn new(interners: &CtxtInterners<'tcx>, types: &CommonTypes<'tcx>) -> CommonConsts<'tcx> {
982982
let mk_const = |c| {
983-
interners.const_.borrow_mut().intern(c, |c| {
983+
interners.const_.intern(c, |c| {
984984
Interned(interners.arena.alloc(c))
985985
}).0
986986
};
@@ -1096,14 +1096,14 @@ pub struct GlobalCtxt<'tcx> {
10961096
/// Data layout specification for the current target.
10971097
pub data_layout: TargetDataLayout,
10981098

1099-
stability_interner: Lock<FxHashMap<&'tcx attr::Stability, ()>>,
1099+
stability_interner: ShardedHashMap<&'tcx attr::Stability, ()>,
11001100

11011101
/// Stores the value of constants (and deduplicates the actual memory)
1102-
allocation_interner: Lock<FxHashMap<&'tcx Allocation, ()>>,
1102+
allocation_interner: ShardedHashMap<&'tcx Allocation, ()>,
11031103

11041104
pub alloc_map: Lock<interpret::AllocMap<'tcx>>,
11051105

1106-
layout_interner: Lock<FxHashMap<&'tcx LayoutDetails, ()>>,
1106+
layout_interner: ShardedHashMap<&'tcx LayoutDetails, ()>,
11071107

11081108
/// A general purpose channel to throw data out the back towards LLVM worker
11091109
/// threads.
@@ -1148,7 +1148,7 @@ impl<'tcx> TyCtxt<'tcx> {
11481148
}
11491149

11501150
pub fn intern_const_alloc(self, alloc: Allocation) -> &'tcx Allocation {
1151-
self.allocation_interner.borrow_mut().intern(alloc, |alloc| {
1151+
self.allocation_interner.intern(alloc, |alloc| {
11521152
self.arena.alloc(alloc)
11531153
})
11541154
}
@@ -1162,13 +1162,13 @@ impl<'tcx> TyCtxt<'tcx> {
11621162
}
11631163

11641164
pub fn intern_stability(self, stab: attr::Stability) -> &'tcx attr::Stability {
1165-
self.stability_interner.borrow_mut().intern(stab, |stab| {
1165+
self.stability_interner.intern(stab, |stab| {
11661166
self.arena.alloc(stab)
11671167
})
11681168
}
11691169

11701170
pub fn intern_layout(self, layout: LayoutDetails) -> &'tcx LayoutDetails {
1171-
self.layout_interner.borrow_mut().intern(layout, |layout| {
1171+
self.layout_interner.intern(layout, |layout| {
11721172
self.arena.alloc(layout)
11731173
})
11741174
}
@@ -2110,7 +2110,9 @@ macro_rules! sty_debug_print {
21102110
};
21112111
$(let mut $variant = total;)*
21122112

2113-
for &Interned(t) in tcx.interners.type_.borrow().keys() {
2113+
let shards = tcx.interners.type_.lock_shards();
2114+
let types = shards.iter().flat_map(|shard| shard.keys());
2115+
for &Interned(t) in types {
21142116
let variant = match t.sty {
21152117
ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
21162118
ty::Float(..) | ty::Str | ty::Never => continue,
@@ -2161,11 +2163,11 @@ impl<'tcx> TyCtxt<'tcx> {
21612163
Generator, GeneratorWitness, Dynamic, Closure, Tuple, Bound,
21622164
Param, Infer, UnnormalizedProjection, Projection, Opaque, Foreign);
21632165

2164-
println!("InternalSubsts interner: #{}", self.interners.substs.borrow().len());
2165-
println!("Region interner: #{}", self.interners.region.borrow().len());
2166-
println!("Stability interner: #{}", self.stability_interner.borrow().len());
2167-
println!("Allocation interner: #{}", self.allocation_interner.borrow().len());
2168-
println!("Layout interner: #{}", self.layout_interner.borrow().len());
2166+
println!("InternalSubsts interner: #{}", self.interners.substs.len());
2167+
println!("Region interner: #{}", self.interners.region.len());
2168+
println!("Stability interner: #{}", self.stability_interner.len());
2169+
println!("Allocation interner: #{}", self.allocation_interner.len());
2170+
println!("Layout interner: #{}", self.layout_interner.len());
21692171
}
21702172
}
21712173

@@ -2298,7 +2300,7 @@ macro_rules! intern_method {
22982300
// determine that all contents are in the global tcx.
22992301
// See comments on Lift for why we can't use that.
23002302
if ($keep_in_local_tcx)(&v) {
2301-
self.interners.$name.borrow_mut().intern_ref(key, || {
2303+
self.interners.$name.intern_ref(key, || {
23022304
// Make sure we don't end up with inference
23032305
// types/regions in the global tcx.
23042306
if self.is_global() {
@@ -2310,7 +2312,7 @@ macro_rules! intern_method {
23102312
Interned($alloc_method(&self.interners.arena, v))
23112313
}).0
23122314
} else {
2313-
self.global_interners.$name.borrow_mut().intern_ref(key, || {
2315+
self.global_interners.$name.intern_ref(key, || {
23142316
Interned($alloc_method(&self.global_interners.arena, v))
23152317
}).0
23162318
}

‎src/librustc/ty/query/config.rs

+5-6
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
use crate::dep_graph::SerializedDepNodeIndex;
2-
use crate::dep_graph::DepNode;
1+
use crate::dep_graph::{DepNode, DepNodeIndex};
32
use crate::hir::def_id::{CrateNum, DefId};
43
use crate::ty::TyCtxt;
54
use crate::ty::query::queries;
@@ -11,7 +10,7 @@ use crate::util::profiling::ProfileCategory;
1110
use std::borrow::Cow;
1211
use std::hash::Hash;
1312
use std::fmt::Debug;
14-
use rustc_data_structures::sync::Lock;
13+
use rustc_data_structures::sharded::Sharded;
1514
use rustc_data_structures::fingerprint::Fingerprint;
1615
use crate::ich::StableHashingContext;
1716

@@ -31,7 +30,7 @@ pub(crate) trait QueryAccessors<'tcx>: QueryConfig<'tcx> {
3130
fn query(key: Self::Key) -> Query<'tcx>;
3231

3332
// Don't use this method to access query results, instead use the methods on TyCtxt
34-
fn query_cache<'a>(tcx: TyCtxt<'tcx>) -> &'a Lock<QueryCache<'tcx, Self>>;
33+
fn query_cache<'a>(tcx: TyCtxt<'tcx>) -> &'a Sharded<QueryCache<'tcx, Self>>;
3534

3635
fn to_dep_node(tcx: TyCtxt<'tcx>, key: &Self::Key) -> DepNode;
3736

@@ -54,7 +53,7 @@ pub(crate) trait QueryDescription<'tcx>: QueryAccessors<'tcx> {
5453
false
5554
}
5655

57-
fn try_load_from_disk(_: TyCtxt<'tcx>, _: SerializedDepNodeIndex) -> Option<Self::Value> {
56+
fn try_load_from_disk(_: TyCtxt<'tcx>, _: DepNodeIndex) -> Option<Self::Value> {
5857
bug!("QueryDescription::load_from_disk() called for an unsupported query.")
5958
}
6059
}
@@ -86,7 +85,7 @@ macro_rules! impl_disk_cacheable_query(
8685

8786
#[inline]
8887
fn try_load_from_disk(tcx: TyCtxt<'tcx>,
89-
id: SerializedDepNodeIndex)
88+
id: DepNodeIndex)
9089
-> Option<Self::Value> {
9190
tcx.queries.on_disk_cache.try_load_query_result(tcx, id)
9291
}

‎src/librustc/ty/query/on_disk_cache.rs

+21-21
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
1+
use crate::dep_graph::DepNodeIndex;
22
use crate::hir;
33
use crate::hir::def_id::{CrateNum, DefIndex, DefId, LocalDefId, LOCAL_CRATE};
44
use crate::hir::map::definitions::DefPathHash;
@@ -62,11 +62,11 @@ pub struct OnDiskCache<'sess> {
6262

6363
// A map from dep-node to the position of the cached query result in
6464
// `serialized_data`.
65-
query_result_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
65+
query_result_index: FxHashMap<DepNodeIndex, AbsoluteBytePos>,
6666

6767
// A map from dep-node to the position of any associated diagnostics in
6868
// `serialized_data`.
69-
prev_diagnostics_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
69+
prev_diagnostics_index: FxHashMap<DepNodeIndex, AbsoluteBytePos>,
7070

7171
alloc_decoding_state: AllocDecodingState,
7272
}
@@ -82,8 +82,8 @@ struct Footer {
8282
interpret_alloc_index: Vec<u32>,
8383
}
8484

85-
type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
86-
type EncodedDiagnosticsIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
85+
type EncodedQueryResultIndex = Vec<(DepNodeIndex, AbsoluteBytePos)>;
86+
type EncodedDiagnosticsIndex = Vec<(DepNodeIndex, AbsoluteBytePos)>;
8787
type EncodedDiagnostics = Vec<Diagnostic>;
8888

8989
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
@@ -224,18 +224,18 @@ impl<'sess> OnDiskCache<'sess> {
224224

225225
// const eval is special, it only encodes successfully evaluated constants
226226
use crate::ty::query::QueryAccessors;
227-
let cache = const_eval::query_cache(tcx).borrow();
228-
assert!(cache.active.is_empty());
229-
for (key, entry) in cache.results.iter() {
227+
let shards = const_eval::query_cache(tcx).lock_shards();
228+
assert!(shards.iter().all(|shard| shard.active.is_empty()));
229+
for (key, entry) in shards.iter().flat_map(|shard| shard.results.iter()) {
230230
use crate::ty::query::config::QueryDescription;
231231
if const_eval::cache_on_disk(tcx, key.clone()) {
232232
if let Ok(ref value) = entry.value {
233-
let dep_node = SerializedDepNodeIndex::new(entry.index.index());
233+
let dep_node = DepNodeIndex::new(entry.index.index());
234234

235235
// Record position of the cache entry
236236
qri.push((dep_node, AbsoluteBytePos::new(enc.position())));
237237

238-
// Encode the type check tables with the SerializedDepNodeIndex
238+
// Encode the type check tables with the DepNodeIndex
239239
// as tag.
240240
enc.encode_tagged(dep_node, value)?;
241241
}
@@ -253,7 +253,7 @@ impl<'sess> OnDiskCache<'sess> {
253253
let pos = AbsoluteBytePos::new(encoder.position());
254254
// Let's make sure we get the expected type here:
255255
let diagnostics: &EncodedDiagnostics = diagnostics;
256-
let dep_node_index = SerializedDepNodeIndex::new(dep_node_index.index());
256+
let dep_node_index = DepNodeIndex::new(dep_node_index.index());
257257
encoder.encode_tagged(dep_node_index, diagnostics)?;
258258

259259
Ok((dep_node_index, pos))
@@ -327,7 +327,7 @@ impl<'sess> OnDiskCache<'sess> {
327327
pub fn load_diagnostics<'tcx>(
328328
&self,
329329
tcx: TyCtxt<'tcx>,
330-
dep_node_index: SerializedDepNodeIndex,
330+
dep_node_index: DepNodeIndex,
331331
) -> Vec<Diagnostic> {
332332
let diagnostics: Option<EncodedDiagnostics> = self.load_indexed(
333333
tcx,
@@ -352,11 +352,11 @@ impl<'sess> OnDiskCache<'sess> {
352352
}
353353

354354
/// Returns the cached query result if there is something in the cache for
355-
/// the given `SerializedDepNodeIndex`; otherwise returns `None`.
355+
/// the given `DepNodeIndex`; otherwise returns `None`.
356356
pub fn try_load_query_result<'tcx, T>(
357357
&self,
358358
tcx: TyCtxt<'tcx>,
359-
dep_node_index: SerializedDepNodeIndex,
359+
dep_node_index: DepNodeIndex,
360360
) -> Option<T>
361361
where
362362
T: Decodable,
@@ -386,8 +386,8 @@ impl<'sess> OnDiskCache<'sess> {
386386
fn load_indexed<'tcx, T>(
387387
&self,
388388
tcx: TyCtxt<'tcx>,
389-
dep_node_index: SerializedDepNodeIndex,
390-
index: &FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
389+
dep_node_index: DepNodeIndex,
390+
index: &FxHashMap<DepNodeIndex, AbsoluteBytePos>,
391391
debug_tag: &'static str,
392392
) -> Option<T>
393393
where
@@ -1087,16 +1087,16 @@ where
10871087
unsafe { ::std::intrinsics::type_name::<Q>() });
10881088

10891089
time_ext(tcx.sess.time_extended(), Some(tcx.sess), desc, || {
1090-
let map = Q::query_cache(tcx).borrow();
1091-
assert!(map.active.is_empty());
1092-
for (key, entry) in map.results.iter() {
1090+
let shards = Q::query_cache(tcx).lock_shards();
1091+
assert!(shards.iter().all(|shard| shard.active.is_empty()));
1092+
for (key, entry) in shards.iter().flat_map(|shard| shard.results.iter()) {
10931093
if Q::cache_on_disk(tcx, key.clone()) {
1094-
let dep_node = SerializedDepNodeIndex::new(entry.index.index());
1094+
let dep_node = DepNodeIndex::new(entry.index.index());
10951095

10961096
// Record position of the cache entry
10971097
query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position())));
10981098

1099-
// Encode the type check tables with the SerializedDepNodeIndex
1099+
// Encode the type check tables with the DepNodeIndex
11001100
// as tag.
11011101
encoder.encode_tagged(dep_node, &entry.value)?;
11021102
}

‎src/librustc/ty/query/plumbing.rs

+31-37
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
//! generate the actual methods on tcx which find and execute the provider,
33
//! manage the caches, and so forth.
44
5-
use crate::dep_graph::{DepNodeIndex, DepNode, DepKind, SerializedDepNodeIndex};
5+
use crate::dep_graph::{DepNodeIndex, DepNode, DepKind};
66
use crate::ty::tls;
77
use crate::ty::{self, TyCtxt};
88
use crate::ty::query::Query;
@@ -17,6 +17,7 @@ use errors::Diagnostic;
1717
use errors::FatalError;
1818
use rustc_data_structures::fx::{FxHashMap};
1919
use rustc_data_structures::sync::{Lrc, Lock};
20+
use rustc_data_structures::sharded::Sharded;
2021
use rustc_data_structures::thin_vec::ThinVec;
2122
#[cfg(not(parallel_compiler))]
2223
use rustc_data_structures::cold_path;
@@ -90,7 +91,7 @@ macro_rules! profq_query_msg {
9091
/// A type representing the responsibility to execute the job in the `job` field.
9192
/// This will poison the relevant query if dropped.
9293
pub(super) struct JobOwner<'a, 'tcx, Q: QueryDescription<'tcx> + 'a> {
93-
cache: &'a Lock<QueryCache<'tcx, Q>>,
94+
cache: &'a Sharded<QueryCache<'tcx, Q>>,
9495
key: Q::Key,
9596
job: Lrc<QueryJob<'tcx>>,
9697
}
@@ -107,7 +108,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
107108
pub(super) fn try_get(tcx: TyCtxt<'tcx>, span: Span, key: &Q::Key) -> TryGetJob<'a, 'tcx, Q> {
108109
let cache = Q::query_cache(tcx);
109110
loop {
110-
let mut lock = cache.borrow_mut();
111+
let mut lock = cache.get_shard_by_value(key).lock();
111112
if let Some(value) = lock.results.get(key) {
112113
profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
113114
tcx.sess.profiler(|p| p.record_query_hit(Q::NAME));
@@ -191,7 +192,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
191192

192193
let value = QueryValue::new(result.clone(), dep_node_index);
193194
{
194-
let mut lock = cache.borrow_mut();
195+
let mut lock = cache.get_shard_by_value(&key).lock();
195196
lock.active.remove(&key);
196197
lock.results.insert(key, value);
197198
}
@@ -215,7 +216,8 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> {
215216
#[cold]
216217
fn drop(&mut self) {
217218
// Poison the query so jobs waiting on it panic
218-
self.cache.borrow_mut().active.insert(self.key.clone(), QueryResult::Poisoned);
219+
let shard = self.cache.get_shard_by_value(&self.key);
220+
shard.lock().active.insert(self.key.clone(), QueryResult::Poisoned);
219221
// Also signal the completion of the job, so waiters
220222
// will continue execution
221223
self.job.signal_complete();
@@ -411,10 +413,9 @@ impl<'tcx> TyCtxt<'tcx> {
411413
// try_mark_green(), so we can ignore them here.
412414
let loaded = self.start_query(job.job.clone(), None, |tcx| {
413415
let marked = tcx.dep_graph.try_mark_green_and_read(tcx, &dep_node);
414-
marked.map(|(prev_dep_node_index, dep_node_index)| {
416+
marked.map(|dep_node_index| {
415417
(tcx.load_from_disk_and_cache_in_memory::<Q>(
416418
key.clone(),
417-
prev_dep_node_index,
418419
dep_node_index,
419420
&dep_node
420421
), dep_node_index)
@@ -434,7 +435,6 @@ impl<'tcx> TyCtxt<'tcx> {
434435
fn load_from_disk_and_cache_in_memory<Q: QueryDescription<'tcx>>(
435436
self,
436437
key: Q::Key,
437-
prev_dep_node_index: SerializedDepNodeIndex,
438438
dep_node_index: DepNodeIndex,
439439
dep_node: &DepNode,
440440
) -> Q::Value {
@@ -447,7 +447,7 @@ impl<'tcx> TyCtxt<'tcx> {
447447
let result = if Q::cache_on_disk(self.global_tcx(), key.clone()) &&
448448
self.sess.opts.debugging_opts.incremental_queries {
449449
self.sess.profiler(|p| p.incremental_load_result_start(Q::NAME));
450-
let result = Q::try_load_from_disk(self.global_tcx(), prev_dep_node_index);
450+
let result = Q::try_load_from_disk(self.global_tcx(), dep_node_index);
451451
self.sess.profiler(|p| p.incremental_load_result_end(Q::NAME));
452452

453453
// We always expect to find a cached result for things that
@@ -486,11 +486,11 @@ impl<'tcx> TyCtxt<'tcx> {
486486
// If -Zincremental-verify-ich is specified, re-hash results from
487487
// the cache and make sure that they have the expected fingerprint.
488488
if unlikely!(self.sess.opts.debugging_opts.incremental_verify_ich) {
489-
self.incremental_verify_ich::<Q>(&result, dep_node, dep_node_index);
489+
self.incremental_verify_ich::<Q>(&result, dep_node);
490490
}
491491

492492
if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
493-
self.dep_graph.mark_loaded_from_cache(dep_node_index, true);
493+
self.dep_graph.mark_loaded_from_cache(*dep_node, true);
494494
}
495495

496496
result
@@ -502,24 +502,18 @@ impl<'tcx> TyCtxt<'tcx> {
502502
self,
503503
result: &Q::Value,
504504
dep_node: &DepNode,
505-
dep_node_index: DepNodeIndex,
506505
) {
507506
use crate::ich::Fingerprint;
508507

509-
assert!(Some(self.dep_graph.fingerprint_of(dep_node_index)) ==
510-
self.dep_graph.prev_fingerprint_of(dep_node),
511-
"Fingerprint for green query instance not loaded \
512-
from cache: {:?}", dep_node);
513-
514508
debug!("BEGIN verify_ich({:?})", dep_node);
515509
let mut hcx = self.create_stable_hashing_context();
516510

517511
let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
518512
debug!("END verify_ich({:?})", dep_node);
519513

520-
let old_hash = self.dep_graph.fingerprint_of(dep_node_index);
514+
let old_hash = self.dep_graph.prev_fingerprint_of(dep_node);
521515

522-
assert!(new_hash == old_hash, "Found unstable fingerprints \
516+
assert!(Some(new_hash) == old_hash, "Found unstable fingerprints \
523517
for {:?}", dep_node);
524518
}
525519

@@ -566,7 +560,7 @@ impl<'tcx> TyCtxt<'tcx> {
566560
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
567561

568562
if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
569-
self.dep_graph.mark_loaded_from_cache(dep_node_index, false);
563+
self.dep_graph.mark_loaded_from_cache(dep_node, false);
570564
}
571565

572566
if dep_node.kind != crate::dep_graph::DepKind::Null {
@@ -683,7 +677,7 @@ macro_rules! define_queries_inner {
683677
use std::mem;
684678
#[cfg(parallel_compiler)]
685679
use ty::query::job::QueryResult;
686-
use rustc_data_structures::sync::Lock;
680+
use rustc_data_structures::sharded::Sharded;
687681
use crate::{
688682
rustc_data_structures::stable_hasher::HashStable,
689683
rustc_data_structures::stable_hasher::StableHasherResult,
@@ -715,18 +709,17 @@ macro_rules! define_queries_inner {
715709
pub fn collect_active_jobs(&self) -> Vec<Lrc<QueryJob<$tcx>>> {
716710
let mut jobs = Vec::new();
717711

718-
// We use try_lock here since we are only called from the
712+
// We use try_lock_shards here since we are only called from the
719713
// deadlock handler, and this shouldn't be locked.
720714
$(
721-
jobs.extend(
722-
self.$name.try_lock().unwrap().active.values().filter_map(|v|
723-
if let QueryResult::Started(ref job) = *v {
724-
Some(job.clone())
725-
} else {
726-
None
727-
}
728-
)
729-
);
715+
let shards = self.$name.try_lock_shards().unwrap();
716+
jobs.extend(shards.iter().flat_map(|shard| shard.active.values().filter_map(|v|
717+
if let QueryResult::Started(ref job) = *v {
718+
Some(job.clone())
719+
} else {
720+
None
721+
}
722+
)));
730723
)*
731724

732725
jobs
@@ -748,26 +741,27 @@ macro_rules! define_queries_inner {
748741

749742
fn stats<'tcx, Q: QueryConfig<'tcx>>(
750743
name: &'static str,
751-
map: &QueryCache<'tcx, Q>
744+
map: &Sharded<QueryCache<'tcx, Q>>,
752745
) -> QueryStats {
746+
let map = map.lock_shards();
753747
QueryStats {
754748
name,
755749
#[cfg(debug_assertions)]
756-
cache_hits: map.cache_hits,
750+
cache_hits: map.iter().map(|shard| shard.cache_hits).sum(),
757751
#[cfg(not(debug_assertions))]
758752
cache_hits: 0,
759753
key_size: mem::size_of::<Q::Key>(),
760754
key_type: unsafe { type_name::<Q::Key>() },
761755
value_size: mem::size_of::<Q::Value>(),
762756
value_type: unsafe { type_name::<Q::Value>() },
763-
entry_count: map.results.len(),
757+
entry_count: map.iter().map(|shard| shard.results.len()).sum(),
764758
}
765759
}
766760

767761
$(
768762
queries.push(stats::<queries::$name<'_>>(
769763
stringify!($name),
770-
&*self.$name.lock()
764+
&self.$name,
771765
));
772766
)*
773767

@@ -939,7 +933,7 @@ macro_rules! define_queries_inner {
939933
}
940934

941935
#[inline(always)]
942-
fn query_cache<'a>(tcx: TyCtxt<$tcx>) -> &'a Lock<QueryCache<$tcx, Self>> {
936+
fn query_cache<'a>(tcx: TyCtxt<$tcx>) -> &'a Sharded<QueryCache<$tcx, Self>> {
943937
&tcx.queries.$name
944938
}
945939

@@ -1066,7 +1060,7 @@ macro_rules! define_queries_struct {
10661060
providers: IndexVec<CrateNum, Providers<$tcx>>,
10671061
fallback_extern_providers: Box<Providers<$tcx>>,
10681062

1069-
$($(#[$attr])* $name: Lock<QueryCache<$tcx, queries::$name<$tcx>>>,)*
1063+
$($(#[$attr])* $name: Sharded<QueryCache<$tcx, queries::$name<$tcx>>>,)*
10701064
}
10711065
};
10721066
}

‎src/librustc_data_structures/Cargo.toml

+1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ rustc_cratesio_shim = { path = "../librustc_cratesio_shim" }
1919
serialize = { path = "../libserialize" }
2020
graphviz = { path = "../libgraphviz" }
2121
cfg-if = "0.1.2"
22+
crossbeam-utils = { version = "0.6.5", features = ["nightly"] }
2223
stable_deref_trait = "1.0.0"
2324
rayon = { version = "0.2.0", package = "rustc-rayon" }
2425
rayon-core = { version = "0.2.0", package = "rustc-rayon-core" }

‎src/librustc_data_structures/interner.rs

-58
This file was deleted.

‎src/librustc_data_structures/lib.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,6 @@ pub mod flock;
7979
pub mod fx;
8080
pub mod graph;
8181
pub mod indexed_vec;
82-
pub mod interner;
8382
pub mod jobserver;
8483
pub mod obligation_forest;
8584
pub mod owning_ref;
@@ -91,6 +90,7 @@ pub use ena::snapshot_vec;
9190
pub mod sorted_map;
9291
#[macro_use] pub mod stable_hasher;
9392
pub mod sync;
93+
pub mod sharded;
9494
pub mod tiny_list;
9595
pub mod thin_vec;
9696
pub mod transitive_relation;
+163
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
use std::hash::{Hasher, Hash};
2+
use std::mem;
3+
use std::borrow::Borrow;
4+
use std::collections::hash_map::RawEntryMut;
5+
use smallvec::SmallVec;
6+
use crate::fx::{FxHasher, FxHashMap};
7+
use crate::sync::{Lock, LockGuard};
8+
9+
#[derive(Clone, Default)]
10+
#[cfg_attr(parallel_compiler, repr(align(64)))]
11+
struct CacheAligned<T>(T);
12+
13+
#[cfg(parallel_compiler)]
14+
const SHARD_BITS: usize = 5;
15+
16+
#[cfg(not(parallel_compiler))]
17+
const SHARD_BITS: usize = 0;
18+
19+
pub const SHARDS: usize = 1 << SHARD_BITS;
20+
21+
/// An array of cache-line aligned inner locked structures with convenience methods.
22+
#[derive(Clone)]
23+
pub struct Sharded<T> {
24+
shards: [CacheAligned<Lock<T>>; SHARDS],
25+
}
26+
27+
impl<T: Default> Default for Sharded<T> {
28+
#[inline]
29+
fn default() -> Self {
30+
Self::new(|_| T::default())
31+
}
32+
}
33+
34+
impl<T> Sharded<T> {
35+
#[inline]
36+
pub fn new(mut value: impl FnMut(usize) -> T) -> Self {
37+
// Create a vector of the values we want
38+
let mut values: SmallVec<[_; SHARDS]> = (0..SHARDS).map(|i| {
39+
CacheAligned(Lock::new(value(i)))
40+
}).collect();
41+
42+
// Create an unintialized array
43+
let mut shards: mem::MaybeUninit<[CacheAligned<Lock<T>>; SHARDS]> =
44+
mem::MaybeUninit::uninit();
45+
46+
unsafe {
47+
// Copy the values into our array
48+
let first = shards.as_mut_ptr() as *mut CacheAligned<Lock<T>>;
49+
values.as_ptr().copy_to_nonoverlapping(first, SHARDS);
50+
51+
// Ignore the content of the vector
52+
values.set_len(0);
53+
54+
Sharded {
55+
shards: shards.assume_init(),
56+
}
57+
}
58+
}
59+
60+
#[inline]
61+
pub fn get_shard_by_index(&self, index: usize) -> &Lock<T> {
62+
&self.shards[index % SHARDS].0
63+
}
64+
65+
#[inline]
66+
pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
67+
if SHARDS == 1 {
68+
&self.shards[0].0
69+
} else {
70+
self.get_shard_by_hash(make_hash(val))
71+
}
72+
}
73+
74+
#[inline]
75+
pub fn get_shard_by_value_mut<K: Hash + ?Sized>(&mut self, val: &K) -> &mut T {
76+
if SHARDS == 1 {
77+
self.shards[0].0.get_mut()
78+
} else {
79+
self.shards[Self::get_shard_index_by_hash(make_hash(val))].0.get_mut()
80+
}
81+
}
82+
83+
#[inline]
84+
fn get_shard_index_by_hash(hash: u64) -> usize {
85+
let hash_len = mem::size_of::<usize>();
86+
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
87+
// hashbrown also uses the lowest bits, so we can't use those
88+
let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
89+
bits % SHARDS
90+
}
91+
92+
#[inline]
93+
pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
94+
&self.shards[Self::get_shard_index_by_hash(hash)].0
95+
}
96+
97+
pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
98+
(0..SHARDS).map(|i| self.shards[i].0.lock()).collect()
99+
}
100+
101+
pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
102+
let mut result = Vec::with_capacity(SHARDS);
103+
for i in 0..SHARDS {
104+
result.push(self.shards[i].0.try_lock()?);
105+
}
106+
Some(result)
107+
}
108+
}
109+
110+
pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
111+
112+
impl<K: Eq + Hash, V> ShardedHashMap<K, V> {
113+
pub fn len(&self) -> usize{
114+
self.lock_shards().iter().map(|shard| shard.len()).sum()
115+
}
116+
}
117+
118+
impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
119+
#[inline]
120+
pub fn intern_ref<Q: ?Sized>(&self, value: &Q, make: impl FnOnce() -> K) -> K
121+
where K: Borrow<Q>,
122+
Q: Hash + Eq
123+
{
124+
let hash = make_hash(value);
125+
let mut shard = self.get_shard_by_hash(hash).lock();
126+
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
127+
128+
match entry {
129+
RawEntryMut::Occupied(e) => *e.key(),
130+
RawEntryMut::Vacant(e) => {
131+
let v = make();
132+
e.insert_hashed_nocheck(hash, v, ());
133+
v
134+
}
135+
}
136+
}
137+
138+
#[inline]
139+
pub fn intern<Q>(&self, value: Q, make: impl FnOnce(Q) -> K) -> K
140+
where K: Borrow<Q>,
141+
Q: Hash + Eq
142+
{
143+
let hash = make_hash(&value);
144+
let mut shard = self.get_shard_by_hash(hash).lock();
145+
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
146+
147+
match entry {
148+
RawEntryMut::Occupied(e) => *e.key(),
149+
RawEntryMut::Vacant(e) => {
150+
let v = make(value);
151+
e.insert_hashed_nocheck(hash, v, ());
152+
v
153+
}
154+
}
155+
}
156+
}
157+
158+
#[inline]
159+
fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
160+
let mut state = FxHasher::default();
161+
val.hash(&mut state);
162+
state.finish()
163+
}

‎src/librustc_data_structures/sync.rs

+70-1
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,71 @@ cfg_if! {
6767
use std::ops::Add;
6868
use std::panic::{resume_unwind, catch_unwind, AssertUnwindSafe};
6969

70+
#[derive(Debug)]
71+
pub struct AtomicCell<T: Copy>(Cell<T>);
72+
73+
impl<T: Copy> AtomicCell<T> {
74+
#[inline]
75+
pub fn new(v: T) -> Self {
76+
AtomicCell(Cell::new(v))
77+
}
78+
79+
#[inline]
80+
pub fn get_mut(&mut self) -> &mut T {
81+
self.0.get_mut()
82+
}
83+
}
84+
85+
impl<T: Copy> AtomicCell<T> {
86+
pub fn into_inner(self) -> T {
87+
self.0.into_inner()
88+
}
89+
90+
#[inline]
91+
pub fn load(&self) -> T {
92+
self.0.get()
93+
}
94+
95+
#[inline]
96+
pub fn store(&self, val: T) {
97+
self.0.set(val)
98+
}
99+
100+
pub fn swap(&self, val: T) -> T {
101+
self.0.replace(val)
102+
}
103+
}
104+
105+
impl<T: Copy + PartialEq> AtomicCell<T> {
106+
pub fn compare_and_swap(&self, current: T, new: T) -> T {
107+
match self.compare_exchange(current, new) {
108+
Ok(v) => v,
109+
Err(v) => v,
110+
}
111+
}
112+
113+
pub fn compare_exchange(&self,
114+
current: T,
115+
new: T)
116+
-> Result<T, T> {
117+
let read = self.0.get();
118+
if read == current {
119+
self.0.set(new);
120+
Ok(read)
121+
} else {
122+
Err(read)
123+
}
124+
}
125+
}
126+
127+
impl<T: Add<Output=T> + Copy> AtomicCell<T> {
128+
pub fn fetch_add(&self, val: T) -> T {
129+
let old = self.0.get();
130+
self.0.set(old + val);
131+
old
132+
}
133+
}
134+
70135
#[derive(Debug)]
71136
pub struct Atomic<T: Copy>(Cell<T>);
72137

@@ -77,7 +142,7 @@ cfg_if! {
77142
}
78143
}
79144

80-
impl<T: Copy + PartialEq> Atomic<T> {
145+
impl<T: Copy> Atomic<T> {
81146
pub fn into_inner(self) -> T {
82147
self.0.into_inner()
83148
}
@@ -95,7 +160,9 @@ cfg_if! {
95160
pub fn swap(&self, val: T, _: Ordering) -> T {
96161
self.0.replace(val)
97162
}
163+
}
98164

165+
impl<T: Copy + PartialEq> Atomic<T> {
99166
pub fn compare_exchange(&self,
100167
current: T,
101168
new: T,
@@ -271,6 +338,8 @@ cfg_if! {
271338

272339
pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
273340

341+
pub use crossbeam_utils::atomic::AtomicCell;
342+
274343
pub use std::sync::Arc as Lrc;
275344
pub use std::sync::Weak as Weak;
276345

‎src/librustc_incremental/persist/load.rs

+19-7
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
//! Code to save/load the dep-graph from files.
22
33
use rustc_data_structures::fx::FxHashMap;
4-
use rustc::dep_graph::{PreviousDepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
4+
use rustc::dep_graph::{PreviousDepGraph, DepGraphArgs, SerializedDepGraph, CurrentDepGraph};
55
use rustc::session::Session;
66
use rustc::ty::TyCtxt;
77
use rustc::ty::query::OnDiskCache;
@@ -23,16 +23,14 @@ pub fn dep_graph_tcx_init<'tcx>(tcx: TyCtxt<'tcx>) {
2323
tcx.allocate_metadata_dep_nodes();
2424
}
2525

26-
type WorkProductMap = FxHashMap<WorkProductId, WorkProduct>;
27-
2826
pub enum LoadResult<T> {
2927
Ok { data: T },
3028
DataOutOfDate,
3129
Error { message: String },
3230
}
3331

34-
impl LoadResult<(PreviousDepGraph, WorkProductMap)> {
35-
pub fn open(self, sess: &Session) -> (PreviousDepGraph, WorkProductMap) {
32+
impl LoadResult<DepGraphArgs> {
33+
pub fn open(self, sess: &Session) -> DepGraphArgs {
3634
match self {
3735
LoadResult::Error { message } => {
3836
sess.warn(&message);
@@ -93,7 +91,7 @@ impl<T> MaybeAsync<T> {
9391
}
9492
}
9593

96-
pub type DepGraphFuture = MaybeAsync<LoadResult<(PreviousDepGraph, WorkProductMap)>>;
94+
pub type DepGraphFuture = MaybeAsync<LoadResult<DepGraphArgs>>;
9795

9896
/// Launch a thread and load the dependency graph in the background.
9997
pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
@@ -185,7 +183,21 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
185183
let dep_graph = SerializedDepGraph::decode(&mut decoder)
186184
.expect("Error reading cached dep-graph");
187185

188-
LoadResult::Ok { data: (PreviousDepGraph::new(dep_graph), prev_work_products) }
186+
let (prev_graph, state) = PreviousDepGraph::new_and_state(dep_graph);
187+
let current = time_ext(
188+
time_passes,
189+
None,
190+
"background setup current dep-graph",
191+
|| CurrentDepGraph::new(&prev_graph),
192+
);
193+
LoadResult::Ok {
194+
data: DepGraphArgs {
195+
state,
196+
prev_graph,
197+
prev_work_products,
198+
current,
199+
}
200+
}
189201
}
190202
}
191203
})

‎src/librustc_interface/queries.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -173,13 +173,13 @@ impl Compiler {
173173
Ok(match self.dep_graph_future()?.take() {
174174
None => DepGraph::new_disabled(),
175175
Some(future) => {
176-
let (prev_graph, prev_work_products) =
176+
let args =
177177
time(self.session(), "blocked while dep-graph loading finishes", || {
178178
future.open().unwrap_or_else(|e| rustc_incremental::LoadResult::Error {
179179
message: format!("could not decode incremental cache: {:?}", e),
180180
}).open(self.session())
181181
});
182-
DepGraph::new(prev_graph, prev_work_products)
182+
DepGraph::new(args)
183183
}
184184
})
185185
})

‎src/librustc_macros/src/query.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -328,7 +328,7 @@ fn add_query_description_impl(
328328
#[inline]
329329
fn try_load_from_disk(
330330
#tcx: TyCtxt<'tcx>,
331-
#id: SerializedDepNodeIndex
331+
#id: DepNodeIndex
332332
) -> Option<Self::Value> {
333333
#block
334334
}
@@ -339,7 +339,7 @@ fn add_query_description_impl(
339339
#[inline]
340340
fn try_load_from_disk(
341341
tcx: TyCtxt<'tcx>,
342-
id: SerializedDepNodeIndex
342+
id: DepNodeIndex
343343
) -> Option<Self::Value> {
344344
tcx.queries.on_disk_cache.try_load_query_result(tcx, id)
345345
}

‎src/librustc_typeck/coherence/inherent_impls.rs

+4-29
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
//! `tcx.inherent_impls(def_id)`). That value, however,
88
//! is computed by selecting an idea from this table.
99
10-
use rustc::dep_graph::DepKind;
1110
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
1211
use rustc::hir;
1312
use rustc::hir::itemlikevisit::ItemLikeVisitor;
@@ -36,35 +35,11 @@ pub fn crate_inherent_impls<'tcx>(
3635
pub fn inherent_impls<'tcx>(tcx: TyCtxt<'tcx>, ty_def_id: DefId) -> &'tcx [DefId] {
3736
assert!(ty_def_id.is_local());
3837

39-
// NB. Until we adopt the red-green dep-tracking algorithm (see
40-
// [the plan] for details on that), we do some hackery here to get
41-
// the dependencies correct. Basically, we use a `with_ignore` to
42-
// read the result we want. If we didn't have the `with_ignore`,
43-
// we would wind up with a dependency on the entire crate, which
44-
// we don't want. Then we go and add dependencies on all the impls
45-
// in the result (which is what we wanted).
46-
//
47-
// The result is a graph with an edge from `Hir(I)` for every impl
48-
// `I` defined on some type `T` to `CoherentInherentImpls(T)`,
49-
// thus ensuring that if any of those impls change, the set of
50-
// inherent impls is considered dirty.
51-
//
52-
// [the plan]: https://github.com/rust-lang/rust-roadmap/issues/4
53-
54-
let result = tcx.dep_graph.with_ignore(|| {
55-
let crate_map = tcx.crate_inherent_impls(ty_def_id.krate);
56-
match crate_map.inherent_impls.get(&ty_def_id) {
57-
Some(v) => &v[..],
58-
None => &[],
59-
}
60-
});
61-
62-
for &impl_def_id in &result[..] {
63-
let def_path_hash = tcx.def_path_hash(impl_def_id);
64-
tcx.dep_graph.read(def_path_hash.to_dep_node(DepKind::Hir));
38+
let crate_map = tcx.crate_inherent_impls(ty_def_id.krate);
39+
match crate_map.inherent_impls.get(&ty_def_id) {
40+
Some(v) => &v[..],
41+
None => &[],
6542
}
66-
67-
result
6843
}
6944

7045
struct InherentCollect<'tcx> {

0 commit comments

Comments
 (0)
Please sign in to comment.