Skip to content

Commit 618fc02

Browse files
authored
Rollup merge of rust-lang#61923 - Zoxc:dep-stream-prefix-2, r=pnkfelix
Prerequisites from dep graph refactoring rust-lang#2 Split out from rust-lang#60035 and overlaps with rust-lang#60559.
2 parents ea62f9b + 29e7bfd commit 618fc02

File tree

1 file changed

+51
-35
lines changed

1 file changed

+51
-35
lines changed

src/librustc/dep_graph/graph.rs

+51-35
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, Ordering};
77
use std::env;
88
use std::hash::Hash;
99
use std::collections::hash_map::Entry;
10+
use std::mem;
1011
use crate::ty::{self, TyCtxt};
1112
use crate::util::common::{ProfileQueriesMsg, profq_msg};
1213
use parking_lot::{Mutex, Condvar};
@@ -61,11 +62,11 @@ struct DepGraphData {
6162

6263
colors: DepNodeColorMap,
6364

64-
/// A set of loaded diagnostics that have been emitted.
65-
emitted_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
65+
/// A set of loaded diagnostics that is in the progress of being emitted.
66+
emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
6667

6768
/// Used to wait for diagnostics to be emitted.
68-
emitted_diagnostics_cond_var: Condvar,
69+
emitting_diagnostics_cond_var: Condvar,
6970

7071
/// When we load, there may be `.o` files, cached MIR, or other such
7172
/// things available to us. If we find that they are not dirty, we
@@ -99,8 +100,8 @@ impl DepGraph {
99100
previous_work_products: prev_work_products,
100101
dep_node_debug: Default::default(),
101102
current: Lock::new(CurrentDepGraph::new(prev_graph_node_count)),
102-
emitted_diagnostics: Default::default(),
103-
emitted_diagnostics_cond_var: Condvar::new(),
103+
emitting_diagnostics: Default::default(),
104+
emitting_diagnostics_cond_var: Condvar::new(),
104105
previous: prev_graph,
105106
colors: DepNodeColorMap::new(prev_graph_node_count),
106107
loaded_from_cache: Default::default(),
@@ -744,7 +745,7 @@ impl DepGraph {
744745

745746
// There may be multiple threads trying to mark the same dep node green concurrently
746747

747-
let (dep_node_index, did_allocation) = {
748+
let dep_node_index = {
748749
let mut current = data.current.borrow_mut();
749750

750751
// Copy the fingerprint from the previous graph,
@@ -758,71 +759,86 @@ impl DepGraph {
758759

759760
// ... emitting any stored diagnostic ...
760761

762+
// FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
763+
// Maybe store a list on disk and encode this fact in the DepNodeState
761764
let diagnostics = tcx.queries.on_disk_cache
762-
.load_diagnostics(tcx, prev_dep_node_index);
765+
.load_diagnostics(tcx, prev_dep_node_index);
766+
767+
#[cfg(not(parallel_compiler))]
768+
debug_assert!(data.colors.get(prev_dep_node_index).is_none(),
769+
"DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
770+
insertion for {:?}", dep_node);
763771

764772
if unlikely!(diagnostics.len() > 0) {
765773
self.emit_diagnostics(
766774
tcx,
767775
data,
768776
dep_node_index,
769-
did_allocation,
777+
prev_dep_node_index,
770778
diagnostics
771779
);
772780
}
773781

774782
// ... and finally storing a "Green" entry in the color map.
775783
// Multiple threads can all write the same color here
776-
#[cfg(not(parallel_compiler))]
777-
debug_assert!(data.colors.get(prev_dep_node_index).is_none(),
778-
"DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
779-
insertion for {:?}", dep_node);
780-
781784
data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
782785

783786
debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
784787
Some(dep_node_index)
785788
}
786789

787-
/// Atomically emits some loaded diagnotics, assuming that this only gets called with
788-
/// `did_allocation` set to `true` on a single thread.
790+
/// Atomically emits some loaded diagnostics.
791+
/// This may be called concurrently on multiple threads for the same dep node.
789792
#[cold]
790793
#[inline(never)]
791794
fn emit_diagnostics<'tcx>(
792795
&self,
793796
tcx: TyCtxt<'tcx>,
794797
data: &DepGraphData,
795798
dep_node_index: DepNodeIndex,
796-
did_allocation: bool,
799+
prev_dep_node_index: SerializedDepNodeIndex,
797800
diagnostics: Vec<Diagnostic>,
798801
) {
799-
if did_allocation || !cfg!(parallel_compiler) {
800-
// Only the thread which did the allocation emits the error messages
801-
let handle = tcx.sess.diagnostic();
802+
let mut emitting = data.emitting_diagnostics.lock();
803+
804+
if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
805+
// The node is already green so diagnostics must have been emitted already
806+
return;
807+
}
808+
809+
if emitting.insert(dep_node_index) {
810+
// We were the first to insert the node in the set so this thread
811+
// must emit the diagnostics and signal other potentially waiting
812+
// threads after.
813+
mem::drop(emitting);
802814

803815
// Promote the previous diagnostics to the current session.
804816
tcx.queries.on_disk_cache
805-
.store_diagnostics(dep_node_index, diagnostics.clone().into());
817+
.store_diagnostics(dep_node_index, diagnostics.clone().into());
818+
819+
let handle = tcx.sess.diagnostic();
806820

807821
for diagnostic in diagnostics {
808822
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
809823
}
810824

811-
#[cfg(parallel_compiler)]
812-
{
813-
// Mark the diagnostics and emitted and wake up waiters
814-
data.emitted_diagnostics.lock().insert(dep_node_index);
815-
data.emitted_diagnostics_cond_var.notify_all();
816-
}
825+
// Mark the node as green now that diagnostics are emitted
826+
data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
827+
828+
// Remove the node from the set
829+
data.emitting_diagnostics.lock().remove(&dep_node_index);
830+
831+
// Wake up waiters
832+
data.emitting_diagnostics_cond_var.notify_all();
817833
} else {
818-
// The other threads will wait for the diagnostics to be emitted
834+
// We must wait for the other thread to finish emitting the diagnostic
819835

820-
let mut emitted_diagnostics = data.emitted_diagnostics.lock();
821836
loop {
822-
if emitted_diagnostics.contains(&dep_node_index) {
837+
data.emitting_diagnostics_cond_var.wait(&mut emitting);
838+
if data.colors
839+
.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
823840
break;
824841
}
825-
data.emitted_diagnostics_cond_var.wait(&mut emitted_diagnostics);
826842
}
827843
}
828844
}
@@ -1027,7 +1043,7 @@ impl CurrentDepGraph {
10271043
hash: self.anon_id_seed.combine(hasher.finish()),
10281044
};
10291045

1030-
self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO).0
1046+
self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
10311047
}
10321048

10331049
fn alloc_node(
@@ -1037,19 +1053,19 @@ impl CurrentDepGraph {
10371053
fingerprint: Fingerprint
10381054
) -> DepNodeIndex {
10391055
debug_assert!(!self.node_to_node_index.contains_key(&dep_node));
1040-
self.intern_node(dep_node, edges, fingerprint).0
1056+
self.intern_node(dep_node, edges, fingerprint)
10411057
}
10421058

10431059
fn intern_node(
10441060
&mut self,
10451061
dep_node: DepNode,
10461062
edges: SmallVec<[DepNodeIndex; 8]>,
10471063
fingerprint: Fingerprint
1048-
) -> (DepNodeIndex, bool) {
1064+
) -> DepNodeIndex {
10491065
debug_assert_eq!(self.node_to_node_index.len(), self.data.len());
10501066

10511067
match self.node_to_node_index.entry(dep_node) {
1052-
Entry::Occupied(entry) => (*entry.get(), false),
1068+
Entry::Occupied(entry) => *entry.get(),
10531069
Entry::Vacant(entry) => {
10541070
let dep_node_index = DepNodeIndex::new(self.data.len());
10551071
self.data.push(DepNodeData {
@@ -1058,7 +1074,7 @@ impl CurrentDepGraph {
10581074
fingerprint
10591075
});
10601076
entry.insert(dep_node_index);
1061-
(dep_node_index, true)
1077+
dep_node_index
10621078
}
10631079
}
10641080
}

0 commit comments

Comments
 (0)