2
2
//! generate the actual methods on tcx which find and execute the provider,
3
3
//! manage the caches, and so forth.
4
4
5
- use crate :: dep_graph:: { DepNodeIndex , DepNode , DepKind , SerializedDepNodeIndex } ;
5
+ use crate :: dep_graph:: { DepNodeIndex , DepNode , DepKind } ;
6
6
use crate :: ty:: tls;
7
7
use crate :: ty:: { self , TyCtxt } ;
8
8
use crate :: ty:: query:: Query ;
@@ -17,6 +17,7 @@ use errors::Diagnostic;
17
17
use errors:: FatalError ;
18
18
use rustc_data_structures:: fx:: { FxHashMap } ;
19
19
use rustc_data_structures:: sync:: { Lrc , Lock } ;
20
+ use rustc_data_structures:: sharded:: Sharded ;
20
21
use rustc_data_structures:: thin_vec:: ThinVec ;
21
22
#[ cfg( not( parallel_compiler) ) ]
22
23
use rustc_data_structures:: cold_path;
@@ -90,7 +91,7 @@ macro_rules! profq_query_msg {
90
91
/// A type representing the responsibility to execute the job in the `job` field.
91
92
/// This will poison the relevant query if dropped.
92
93
pub ( super ) struct JobOwner < ' a , ' tcx , Q : QueryDescription < ' tcx > + ' a > {
93
- cache : & ' a Lock < QueryCache < ' tcx , Q > > ,
94
+ cache : & ' a Sharded < QueryCache < ' tcx , Q > > ,
94
95
key : Q :: Key ,
95
96
job : Lrc < QueryJob < ' tcx > > ,
96
97
}
@@ -107,7 +108,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
107
108
pub ( super ) fn try_get ( tcx : TyCtxt < ' tcx > , span : Span , key : & Q :: Key ) -> TryGetJob < ' a , ' tcx , Q > {
108
109
let cache = Q :: query_cache ( tcx) ;
109
110
loop {
110
- let mut lock = cache. borrow_mut ( ) ;
111
+ let mut lock = cache. get_shard_by_value ( key ) . lock ( ) ;
111
112
if let Some ( value) = lock. results . get ( key) {
112
113
profq_msg ! ( tcx, ProfileQueriesMsg :: CacheHit ) ;
113
114
tcx. sess . profiler ( |p| p. record_query_hit ( Q :: NAME ) ) ;
@@ -191,7 +192,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
191
192
192
193
let value = QueryValue :: new ( result. clone ( ) , dep_node_index) ;
193
194
{
194
- let mut lock = cache. borrow_mut ( ) ;
195
+ let mut lock = cache. get_shard_by_value ( & key ) . lock ( ) ;
195
196
lock. active . remove ( & key) ;
196
197
lock. results . insert ( key, value) ;
197
198
}
@@ -215,7 +216,8 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> {
215
216
#[ cold]
216
217
fn drop ( & mut self ) {
217
218
// Poison the query so jobs waiting on it panic
218
- self . cache . borrow_mut ( ) . active . insert ( self . key . clone ( ) , QueryResult :: Poisoned ) ;
219
+ let shard = self . cache . get_shard_by_value ( & self . key ) ;
220
+ shard. lock ( ) . active . insert ( self . key . clone ( ) , QueryResult :: Poisoned ) ;
219
221
// Also signal the completion of the job, so waiters
220
222
// will continue execution
221
223
self . job . signal_complete ( ) ;
@@ -411,10 +413,9 @@ impl<'tcx> TyCtxt<'tcx> {
411
413
// try_mark_green(), so we can ignore them here.
412
414
let loaded = self . start_query ( job. job . clone ( ) , None , |tcx| {
413
415
let marked = tcx. dep_graph . try_mark_green_and_read ( tcx, & dep_node) ;
414
- marked. map ( |( prev_dep_node_index , dep_node_index) | {
416
+ marked. map ( |dep_node_index| {
415
417
( tcx. load_from_disk_and_cache_in_memory :: < Q > (
416
418
key. clone ( ) ,
417
- prev_dep_node_index,
418
419
dep_node_index,
419
420
& dep_node
420
421
) , dep_node_index)
@@ -434,7 +435,6 @@ impl<'tcx> TyCtxt<'tcx> {
434
435
fn load_from_disk_and_cache_in_memory < Q : QueryDescription < ' tcx > > (
435
436
self ,
436
437
key : Q :: Key ,
437
- prev_dep_node_index : SerializedDepNodeIndex ,
438
438
dep_node_index : DepNodeIndex ,
439
439
dep_node : & DepNode ,
440
440
) -> Q :: Value {
@@ -447,7 +447,7 @@ impl<'tcx> TyCtxt<'tcx> {
447
447
let result = if Q :: cache_on_disk ( self . global_tcx ( ) , key. clone ( ) ) &&
448
448
self . sess . opts . debugging_opts . incremental_queries {
449
449
self . sess . profiler ( |p| p. incremental_load_result_start ( Q :: NAME ) ) ;
450
- let result = Q :: try_load_from_disk ( self . global_tcx ( ) , prev_dep_node_index ) ;
450
+ let result = Q :: try_load_from_disk ( self . global_tcx ( ) , dep_node_index ) ;
451
451
self . sess . profiler ( |p| p. incremental_load_result_end ( Q :: NAME ) ) ;
452
452
453
453
// We always expect to find a cached result for things that
@@ -486,11 +486,11 @@ impl<'tcx> TyCtxt<'tcx> {
486
486
// If -Zincremental-verify-ich is specified, re-hash results from
487
487
// the cache and make sure that they have the expected fingerprint.
488
488
if unlikely ! ( self . sess. opts. debugging_opts. incremental_verify_ich) {
489
- self . incremental_verify_ich :: < Q > ( & result, dep_node, dep_node_index ) ;
489
+ self . incremental_verify_ich :: < Q > ( & result, dep_node) ;
490
490
}
491
491
492
492
if unlikely ! ( self . sess. opts. debugging_opts. query_dep_graph) {
493
- self . dep_graph . mark_loaded_from_cache ( dep_node_index , true ) ;
493
+ self . dep_graph . mark_loaded_from_cache ( * dep_node , true ) ;
494
494
}
495
495
496
496
result
@@ -502,24 +502,18 @@ impl<'tcx> TyCtxt<'tcx> {
502
502
self ,
503
503
result : & Q :: Value ,
504
504
dep_node : & DepNode ,
505
- dep_node_index : DepNodeIndex ,
506
505
) {
507
506
use crate :: ich:: Fingerprint ;
508
507
509
- assert ! ( Some ( self . dep_graph. fingerprint_of( dep_node_index) ) ==
510
- self . dep_graph. prev_fingerprint_of( dep_node) ,
511
- "Fingerprint for green query instance not loaded \
512
- from cache: {:?}", dep_node) ;
513
-
514
508
debug ! ( "BEGIN verify_ich({:?})" , dep_node) ;
515
509
let mut hcx = self . create_stable_hashing_context ( ) ;
516
510
517
511
let new_hash = Q :: hash_result ( & mut hcx, result) . unwrap_or ( Fingerprint :: ZERO ) ;
518
512
debug ! ( "END verify_ich({:?})" , dep_node) ;
519
513
520
- let old_hash = self . dep_graph . fingerprint_of ( dep_node_index ) ;
514
+ let old_hash = self . dep_graph . prev_fingerprint_of ( dep_node ) ;
521
515
522
- assert ! ( new_hash == old_hash, "Found unstable fingerprints \
516
+ assert ! ( Some ( new_hash) == old_hash, "Found unstable fingerprints \
523
517
for {:?}", dep_node) ;
524
518
}
525
519
@@ -566,7 +560,7 @@ impl<'tcx> TyCtxt<'tcx> {
566
560
profq_msg ! ( self , ProfileQueriesMsg :: ProviderEnd ) ;
567
561
568
562
if unlikely ! ( self . sess. opts. debugging_opts. query_dep_graph) {
569
- self . dep_graph . mark_loaded_from_cache ( dep_node_index , false ) ;
563
+ self . dep_graph . mark_loaded_from_cache ( dep_node , false ) ;
570
564
}
571
565
572
566
if dep_node. kind != crate :: dep_graph:: DepKind :: Null {
@@ -683,7 +677,7 @@ macro_rules! define_queries_inner {
683
677
use std:: mem;
684
678
#[ cfg( parallel_compiler) ]
685
679
use ty:: query:: job:: QueryResult ;
686
- use rustc_data_structures:: sync :: Lock ;
680
+ use rustc_data_structures:: sharded :: Sharded ;
687
681
use crate :: {
688
682
rustc_data_structures:: stable_hasher:: HashStable ,
689
683
rustc_data_structures:: stable_hasher:: StableHasherResult ,
@@ -715,18 +709,17 @@ macro_rules! define_queries_inner {
715
709
pub fn collect_active_jobs( & self ) -> Vec <Lrc <QueryJob <$tcx>>> {
716
710
let mut jobs = Vec :: new( ) ;
717
711
718
- // We use try_lock here since we are only called from the
712
+ // We use try_lock_shards here since we are only called from the
719
713
// deadlock handler, and this shouldn't be locked.
720
714
$(
721
- jobs. extend(
722
- self . $name. try_lock( ) . unwrap( ) . active. values( ) . filter_map( |v|
723
- if let QueryResult :: Started ( ref job) = * v {
724
- Some ( job. clone( ) )
725
- } else {
726
- None
727
- }
728
- )
729
- ) ;
715
+ let shards = self . $name. try_lock_shards( ) . unwrap( ) ;
716
+ jobs. extend( shards. iter( ) . flat_map( |shard| shard. active. values( ) . filter_map( |v|
717
+ if let QueryResult :: Started ( ref job) = * v {
718
+ Some ( job. clone( ) )
719
+ } else {
720
+ None
721
+ }
722
+ ) ) ) ;
730
723
) *
731
724
732
725
jobs
@@ -748,26 +741,27 @@ macro_rules! define_queries_inner {
748
741
749
742
fn stats<' tcx, Q : QueryConfig <' tcx>>(
750
743
name: & ' static str ,
751
- map: & QueryCache <' tcx, Q >
744
+ map: & Sharded < QueryCache <' tcx, Q >> ,
752
745
) -> QueryStats {
746
+ let map = map. lock_shards( ) ;
753
747
QueryStats {
754
748
name,
755
749
#[ cfg( debug_assertions) ]
756
- cache_hits: map. cache_hits,
750
+ cache_hits: map. iter ( ) . map ( |shard| shard . cache_hits) . sum ( ) ,
757
751
#[ cfg( not( debug_assertions) ) ]
758
752
cache_hits: 0 ,
759
753
key_size: mem:: size_of:: <Q :: Key >( ) ,
760
754
key_type: unsafe { type_name:: <Q :: Key >( ) } ,
761
755
value_size: mem:: size_of:: <Q :: Value >( ) ,
762
756
value_type: unsafe { type_name:: <Q :: Value >( ) } ,
763
- entry_count: map. results. len( ) ,
757
+ entry_count: map. iter ( ) . map ( |shard| shard . results. len( ) ) . sum ( ) ,
764
758
}
765
759
}
766
760
767
761
$(
768
762
queries. push( stats:: <queries:: $name<' _>>(
769
763
stringify!( $name) ,
770
- & * self . $name. lock ( )
764
+ & self . $name,
771
765
) ) ;
772
766
) *
773
767
@@ -939,7 +933,7 @@ macro_rules! define_queries_inner {
939
933
}
940
934
941
935
#[ inline( always) ]
942
- fn query_cache<' a>( tcx: TyCtxt <$tcx>) -> & ' a Lock <QueryCache <$tcx, Self >> {
936
+ fn query_cache<' a>( tcx: TyCtxt <$tcx>) -> & ' a Sharded <QueryCache <$tcx, Self >> {
943
937
& tcx. queries. $name
944
938
}
945
939
@@ -1066,7 +1060,7 @@ macro_rules! define_queries_struct {
1066
1060
providers: IndexVec <CrateNum , Providers <$tcx>>,
1067
1061
fallback_extern_providers: Box <Providers <$tcx>>,
1068
1062
1069
- $( $( #[ $attr] ) * $name: Lock <QueryCache <$tcx, queries:: $name<$tcx>>>, ) *
1063
+ $( $( #[ $attr] ) * $name: Sharded <QueryCache <$tcx, queries:: $name<$tcx>>>, ) *
1070
1064
}
1071
1065
} ;
1072
1066
}
0 commit comments