@@ -133,7 +133,7 @@ impl<D: Deps> DepGraph<D> {
133
133
let colors = DepNodeColorMap :: new ( prev_graph_node_count) ;
134
134
135
135
// Instantiate a dependy-less node only once for anonymous queries.
136
- let _green_node_index = current. intern_new_node (
136
+ let _green_node_index = current. alloc_new_node (
137
137
profiler,
138
138
DepNode { kind : D :: DEP_KIND_NULL , hash : current. anon_id_seed . into ( ) } ,
139
139
EdgesVec :: new ( ) ,
@@ -272,6 +272,7 @@ impl<D: Deps> DepGraph<D> {
272
272
D :: with_deps ( TaskDepsRef :: Forbid , op)
273
273
}
274
274
275
+ // FIXME(sparse_fps): Document
275
276
#[ inline( always) ]
276
277
pub fn with_task < Ctxt : HasDepContext < Deps = D > , A : Debug , R > (
277
278
& self ,
@@ -287,6 +288,7 @@ impl<D: Deps> DepGraph<D> {
287
288
}
288
289
}
289
290
291
+ // FIXME(sparse_fps): Document
290
292
pub fn with_anon_task < Tcx : DepContext < Deps = D > , OP , R > (
291
293
& self ,
292
294
cx : Tcx ,
@@ -297,7 +299,7 @@ impl<D: Deps> DepGraph<D> {
297
299
OP : FnOnce ( ) -> R ,
298
300
{
299
301
match self . data ( ) {
300
- Some ( data) => data. with_anon_task ( cx, dep_kind, op) ,
302
+ Some ( data) => data. with_anon_task ( cx, dep_kind, true , op) ,
301
303
None => ( op ( ) , self . next_virtual_depnode_index ( ) ) ,
302
304
}
303
305
}
@@ -395,61 +397,71 @@ impl<D: Deps> DepGraphData<D> {
395
397
( result, dep_node_index)
396
398
}
397
399
400
+ // FIXME(sparse_fps): Document
398
401
/// Executes something within an "anonymous" task, that is, a task the
399
402
/// `DepNode` of which is determined by the list of inputs it read from.
400
403
pub ( crate ) fn with_anon_task < Tcx : DepContext < Deps = D > , OP , R > (
401
404
& self ,
402
405
cx : Tcx ,
403
406
dep_kind : DepKind ,
407
+ intern : bool ,
404
408
op : OP ,
405
409
) -> ( R , DepNodeIndex )
406
410
where
407
411
OP : FnOnce ( ) -> R ,
408
412
{
409
- debug_assert ! ( !cx. is_eval_always( dep_kind) ) ;
410
-
411
413
let task_deps = Lock :: new ( TaskDeps :: default ( ) ) ;
412
414
let result = D :: with_deps ( TaskDepsRef :: Allow ( & task_deps) , op) ;
413
415
let task_deps = task_deps. into_inner ( ) ;
414
416
let task_deps = task_deps. reads ;
415
417
416
- let dep_node_index = match task_deps. len ( ) {
417
- 0 => {
418
- // Because the dep-node id of anon nodes is computed from the sets of its
419
- // dependencies we already know what the ID of this dependency-less node is
420
- // going to be (i.e. equal to the precomputed
421
- // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
422
- // a `StableHasher` and sending the node through interning.
423
- DepNodeIndex :: SINGLETON_DEPENDENCYLESS_ANON_NODE
424
- }
425
- 1 => {
426
- // When there is only one dependency, don't bother creating a node.
427
- task_deps[ 0 ]
428
- }
429
- _ => {
430
- // The dep node indices are hashed here instead of hashing the dep nodes of the
431
- // dependencies. These indices may refer to different nodes per session, but this isn't
432
- // a problem here because we that ensure the final dep node hash is per session only by
433
- // combining it with the per session random number `anon_id_seed`. This hash only need
434
- // to map the dependencies to a single value on a per session basis.
435
- let mut hasher = StableHasher :: new ( ) ;
436
- task_deps. hash ( & mut hasher) ;
437
-
438
- let target_dep_node = DepNode {
439
- kind : dep_kind,
440
- // Fingerprint::combine() is faster than sending Fingerprint
441
- // through the StableHasher (at least as long as StableHasher
442
- // is so slow).
443
- hash : self . current . anon_id_seed . combine ( hasher. finish ( ) ) . into ( ) ,
444
- } ;
445
-
446
- self . current . intern_new_node (
447
- cx. profiler ( ) ,
448
- target_dep_node,
449
- task_deps,
450
- Fingerprint :: ZERO ,
451
- )
418
+ let dep_node_index = if intern {
419
+ // FIXME(sparse_fp): what is this assertion about?
420
+ debug_assert ! ( !cx. is_eval_always( dep_kind) ) ;
421
+
422
+ match task_deps. len ( ) {
423
+ 0 => {
424
+ // Because the dep-node id of anon nodes is computed from the sets of its
425
+ // dependencies we already know what the ID of this dependency-less node is
426
+ // going to be (i.e. equal to the precomputed
427
+ // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
428
+ // a `StableHasher` and sending the node through interning.
429
+ DepNodeIndex :: SINGLETON_DEPENDENCYLESS_ANON_NODE
430
+ }
431
+ 1 => {
432
+ // When there is only one dependency, don't bother creating a node.
433
+ task_deps[ 0 ]
434
+ }
435
+ _ => {
436
+ // The dep node indices are hashed here instead of hashing the dep nodes of the
437
+ // dependencies. These indices may refer to different nodes per session, but this isn't
438
+ // a problem here because we that ensure the final dep node hash is per session only by
439
+ // combining it with the per session random number `anon_id_seed`. This hash only need
440
+ // to map the dependencies to a single value on a per session basis.
441
+ let mut hasher = StableHasher :: new ( ) ;
442
+ task_deps. hash ( & mut hasher) ;
443
+ dep_kind. hash ( & mut hasher) ;
444
+
445
+ let dedup_fingerprint: Fingerprint = hasher. finish ( ) ;
446
+
447
+ match self
448
+ . current
449
+ . interned_node_to_index
450
+ . lock_shard_by_value ( & dedup_fingerprint)
451
+ . entry ( dedup_fingerprint)
452
+ {
453
+ Entry :: Occupied ( entry) => * entry. get ( ) ,
454
+ Entry :: Vacant ( entry) => {
455
+ let dep_node_index =
456
+ self . current . alloc_anon_node ( cx. profiler ( ) , dep_kind, task_deps) ;
457
+ entry. insert ( dep_node_index) ;
458
+ dep_node_index
459
+ }
460
+ }
461
+ }
452
462
}
463
+ } else {
464
+ self . current . alloc_anon_node ( cx. profiler ( ) , dep_kind, task_deps)
453
465
} ;
454
466
455
467
( result, dep_node_index)
@@ -616,18 +628,20 @@ impl<D: Deps> DepGraph<D> {
616
628
}
617
629
618
630
impl < D : Deps > DepGraphData < D > {
619
- #[ inline]
620
- fn dep_node_index_of_opt ( & self , dep_node : & DepNode ) -> Option < DepNodeIndex > {
621
- if let Some ( prev_index) = self . previous . node_to_index_opt ( dep_node) {
622
- self . current . prev_index_to_index . lock ( ) [ prev_index]
623
- } else {
624
- self . current . new_node_to_index . lock_shard_by_value ( dep_node) . get ( dep_node) . copied ( )
625
- }
626
- }
631
+ // #[inline]
632
+ // fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
633
+ // if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
634
+ // self.current.prev_index_to_index.lock()[prev_index]
635
+ // } else {
636
+ // self.current.interned_node_to_index .lock_shard_by_value(dep_node).get(dep_node).copied()
637
+ // }
638
+ // }
627
639
628
640
#[ inline]
629
- fn dep_node_exists ( & self , dep_node : & DepNode ) -> bool {
630
- self . dep_node_index_of_opt ( dep_node) . is_some ( )
641
+ fn dep_node_exists ( & self , _dep_node : & DepNode ) -> bool {
642
+ // FIXME(sparse_fps): bring back assertions
643
+ //self.dep_node_index_of_opt(dep_node).is_some()
644
+ false
631
645
}
632
646
633
647
fn node_color ( & self , dep_node : & DepNode ) -> Option < DepNodeColor > {
@@ -1071,7 +1085,7 @@ rustc_index::newtype_index! {
1071
1085
/// first, and `data` second.
1072
1086
pub ( super ) struct CurrentDepGraph < D : Deps > {
1073
1087
encoder : Steal < GraphEncoder < D > > ,
1074
- new_node_to_index : Sharded < FxHashMap < DepNode , DepNodeIndex > > ,
1088
+ interned_node_to_index : Sharded < FxHashMap < Fingerprint , DepNodeIndex > > ,
1075
1089
prev_index_to_index : Lock < IndexVec < SerializedDepNodeIndex , Option < DepNodeIndex > > > ,
1076
1090
1077
1091
/// This is used to verify that fingerprints do not change between the creation of a node
@@ -1152,7 +1166,7 @@ impl<D: Deps> CurrentDepGraph<D> {
1152
1166
record_graph,
1153
1167
record_stats,
1154
1168
) ) ,
1155
- new_node_to_index : Sharded :: new ( || {
1169
+ interned_node_to_index : Sharded :: new ( || {
1156
1170
FxHashMap :: with_capacity_and_hasher (
1157
1171
new_node_count_estimate / sharded:: shards ( ) ,
1158
1172
Default :: default ( ) ,
@@ -1182,29 +1196,30 @@ impl<D: Deps> CurrentDepGraph<D> {
1182
1196
/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1183
1197
/// Assumes that this is a node that has no equivalent in the previous dep-graph.
1184
1198
#[ inline( always) ]
1185
- fn intern_new_node (
1199
+ fn alloc_new_node (
1186
1200
& self ,
1187
1201
profiler : & SelfProfilerRef ,
1188
1202
key : DepNode ,
1189
1203
edges : EdgesVec ,
1190
1204
current_fingerprint : Fingerprint ,
1191
1205
) -> DepNodeIndex {
1192
- let dep_node_index = match self . new_node_to_index . lock_shard_by_value ( & key) . entry ( key) {
1193
- Entry :: Occupied ( entry) => * entry. get ( ) ,
1194
- Entry :: Vacant ( entry) => {
1195
- let dep_node_index =
1196
- self . encoder . borrow ( ) . send ( profiler, key, current_fingerprint, edges) ;
1197
- entry. insert ( dep_node_index) ;
1198
- dep_node_index
1199
- }
1200
- } ;
1201
-
1206
+ let dep_node_index = self . encoder . borrow ( ) . send ( profiler, key, current_fingerprint, edges) ;
1202
1207
#[ cfg( debug_assertions) ]
1203
1208
self . record_edge ( dep_node_index, key, current_fingerprint) ;
1204
1209
1205
1210
dep_node_index
1206
1211
}
1207
1212
1213
+ #[ inline( always) ]
1214
+ fn alloc_anon_node (
1215
+ & self ,
1216
+ profiler : & SelfProfilerRef ,
1217
+ dep_kind : DepKind ,
1218
+ edges : EdgesVec ,
1219
+ ) -> DepNodeIndex {
1220
+ self . encoder . borrow ( ) . send_anon_node ( profiler, dep_kind, edges)
1221
+ }
1222
+
1208
1223
fn intern_node (
1209
1224
& self ,
1210
1225
profiler : & SelfProfilerRef ,
@@ -1262,7 +1277,7 @@ impl<D: Deps> CurrentDepGraph<D> {
1262
1277
let fingerprint = fingerprint. unwrap_or ( Fingerprint :: ZERO ) ;
1263
1278
1264
1279
// This is a new node: it didn't exist in the previous compilation session.
1265
- let dep_node_index = self . intern_new_node ( profiler, key, edges, fingerprint) ;
1280
+ let dep_node_index = self . alloc_new_node ( profiler, key, edges, fingerprint) ;
1266
1281
1267
1282
( dep_node_index, None )
1268
1283
}
@@ -1274,7 +1289,8 @@ impl<D: Deps> CurrentDepGraph<D> {
1274
1289
prev_graph : & SerializedDepGraph ,
1275
1290
prev_index : SerializedDepNodeIndex ,
1276
1291
) -> DepNodeIndex {
1277
- self . debug_assert_not_in_new_nodes ( prev_graph, prev_index) ;
1292
+ // FIXME(sparse_fp): restore assertions
1293
+ // self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1278
1294
1279
1295
let mut prev_index_to_index = self . prev_index_to_index . lock ( ) ;
1280
1296
@@ -1296,18 +1312,19 @@ impl<D: Deps> CurrentDepGraph<D> {
1296
1312
}
1297
1313
}
1298
1314
1299
- #[ inline]
1300
- fn debug_assert_not_in_new_nodes (
1301
- & self ,
1302
- prev_graph : & SerializedDepGraph ,
1303
- prev_index : SerializedDepNodeIndex ,
1304
- ) {
1305
- let node = & prev_graph. index_to_node ( prev_index) ;
1306
- debug_assert ! (
1307
- !self . new_node_to_index. lock_shard_by_value( node) . contains_key( node) ,
1308
- "node from previous graph present in new node collection"
1309
- ) ;
1310
- }
1315
+ // FIXME(sparse_fp): restore assertions
1316
+ // #[inline]
1317
+ // fn debug_assert_not_in_new_nodes(
1318
+ // &self,
1319
+ // prev_graph: &SerializedDepGraph,
1320
+ // prev_index: SerializedDepNodeIndex,
1321
+ // ) {
1322
+ // let node = &prev_graph.index_to_node(prev_index);
1323
+ // debug_assert!(
1324
+ // !self.interned_node_to_index.lock_shard_by_value(node).contains_key(node),
1325
+ // "node from previous graph present in new node collection"
1326
+ // );
1327
+ // }
1311
1328
}
1312
1329
1313
1330
#[ derive( Debug , Clone , Copy ) ]
0 commit comments