Skip to content

Commit

Permalink
Rollup merge of rust-lang#61923 - Zoxc:dep-stream-prefix-2, r=pnkfelix
Browse files Browse the repository at this point in the history
Prerequisites from dep graph refactoring #2

Split out from rust-lang#60035 and overlaps with rust-lang#60559.
  • Loading branch information
Centril committed Jul 11, 2019
2 parents ea62f9b + 29e7bfd commit 618fc02
Showing 1 changed file with 51 additions and 35 deletions.
86 changes: 51 additions & 35 deletions src/librustc/dep_graph/graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, Ordering};
use std::env;
use std::hash::Hash;
use std::collections::hash_map::Entry;
use std::mem;
use crate::ty::{self, TyCtxt};
use crate::util::common::{ProfileQueriesMsg, profq_msg};
use parking_lot::{Mutex, Condvar};
Expand Down Expand Up @@ -61,11 +62,11 @@ struct DepGraphData {

colors: DepNodeColorMap,

/// A set of loaded diagnostics that have been emitted.
emitted_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
/// A set of loaded diagnostics that is in the progress of being emitted.
emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,

/// Used to wait for diagnostics to be emitted.
emitted_diagnostics_cond_var: Condvar,
emitting_diagnostics_cond_var: Condvar,

/// When we load, there may be `.o` files, cached MIR, or other such
/// things available to us. If we find that they are not dirty, we
Expand Down Expand Up @@ -99,8 +100,8 @@ impl DepGraph {
previous_work_products: prev_work_products,
dep_node_debug: Default::default(),
current: Lock::new(CurrentDepGraph::new(prev_graph_node_count)),
emitted_diagnostics: Default::default(),
emitted_diagnostics_cond_var: Condvar::new(),
emitting_diagnostics: Default::default(),
emitting_diagnostics_cond_var: Condvar::new(),
previous: prev_graph,
colors: DepNodeColorMap::new(prev_graph_node_count),
loaded_from_cache: Default::default(),
Expand Down Expand Up @@ -744,7 +745,7 @@ impl DepGraph {

// There may be multiple threads trying to mark the same dep node green concurrently

let (dep_node_index, did_allocation) = {
let dep_node_index = {
let mut current = data.current.borrow_mut();

// Copy the fingerprint from the previous graph,
Expand All @@ -758,71 +759,86 @@ impl DepGraph {

// ... emitting any stored diagnostic ...

// FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
// Maybe store a list on disk and encode this fact in the DepNodeState
let diagnostics = tcx.queries.on_disk_cache
.load_diagnostics(tcx, prev_dep_node_index);
.load_diagnostics(tcx, prev_dep_node_index);

#[cfg(not(parallel_compiler))]
debug_assert!(data.colors.get(prev_dep_node_index).is_none(),
"DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
insertion for {:?}", dep_node);

if unlikely!(diagnostics.len() > 0) {
self.emit_diagnostics(
tcx,
data,
dep_node_index,
did_allocation,
prev_dep_node_index,
diagnostics
);
}

// ... and finally storing a "Green" entry in the color map.
// Multiple threads can all write the same color here
#[cfg(not(parallel_compiler))]
debug_assert!(data.colors.get(prev_dep_node_index).is_none(),
"DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
insertion for {:?}", dep_node);

data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));

debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
Some(dep_node_index)
}

/// Atomically emits some loaded diagnotics, assuming that this only gets called with
/// `did_allocation` set to `true` on a single thread.
/// Atomically emits some loaded diagnostics.
/// This may be called concurrently on multiple threads for the same dep node.
#[cold]
#[inline(never)]
fn emit_diagnostics<'tcx>(
&self,
tcx: TyCtxt<'tcx>,
data: &DepGraphData,
dep_node_index: DepNodeIndex,
did_allocation: bool,
prev_dep_node_index: SerializedDepNodeIndex,
diagnostics: Vec<Diagnostic>,
) {
if did_allocation || !cfg!(parallel_compiler) {
// Only the thread which did the allocation emits the error messages
let handle = tcx.sess.diagnostic();
let mut emitting = data.emitting_diagnostics.lock();

if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
// The node is already green so diagnostics must have been emitted already
return;
}

if emitting.insert(dep_node_index) {
// We were the first to insert the node in the set so this thread
// must emit the diagnostics and signal other potentially waiting
// threads after.
mem::drop(emitting);

// Promote the previous diagnostics to the current session.
tcx.queries.on_disk_cache
.store_diagnostics(dep_node_index, diagnostics.clone().into());
.store_diagnostics(dep_node_index, diagnostics.clone().into());

let handle = tcx.sess.diagnostic();

for diagnostic in diagnostics {
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
}

#[cfg(parallel_compiler)]
{
// Mark the diagnostics and emitted and wake up waiters
data.emitted_diagnostics.lock().insert(dep_node_index);
data.emitted_diagnostics_cond_var.notify_all();
}
// Mark the node as green now that diagnostics are emitted
data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));

// Remove the node from the set
data.emitting_diagnostics.lock().remove(&dep_node_index);

// Wake up waiters
data.emitting_diagnostics_cond_var.notify_all();
} else {
// The other threads will wait for the diagnostics to be emitted
// We must wait for the other thread to finish emitting the diagnostic

let mut emitted_diagnostics = data.emitted_diagnostics.lock();
loop {
if emitted_diagnostics.contains(&dep_node_index) {
data.emitting_diagnostics_cond_var.wait(&mut emitting);
if data.colors
.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
break;
}
data.emitted_diagnostics_cond_var.wait(&mut emitted_diagnostics);
}
}
}
Expand Down Expand Up @@ -1027,7 +1043,7 @@ impl CurrentDepGraph {
hash: self.anon_id_seed.combine(hasher.finish()),
};

self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO).0
self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
}

fn alloc_node(
Expand All @@ -1037,19 +1053,19 @@ impl CurrentDepGraph {
fingerprint: Fingerprint
) -> DepNodeIndex {
debug_assert!(!self.node_to_node_index.contains_key(&dep_node));
self.intern_node(dep_node, edges, fingerprint).0
self.intern_node(dep_node, edges, fingerprint)
}

fn intern_node(
&mut self,
dep_node: DepNode,
edges: SmallVec<[DepNodeIndex; 8]>,
fingerprint: Fingerprint
) -> (DepNodeIndex, bool) {
) -> DepNodeIndex {
debug_assert_eq!(self.node_to_node_index.len(), self.data.len());

match self.node_to_node_index.entry(dep_node) {
Entry::Occupied(entry) => (*entry.get(), false),
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let dep_node_index = DepNodeIndex::new(self.data.len());
self.data.push(DepNodeData {
Expand All @@ -1058,7 +1074,7 @@ impl CurrentDepGraph {
fingerprint
});
entry.insert(dep_node_index);
(dep_node_index, true)
dep_node_index
}
}
}
Expand Down

0 comments on commit 618fc02

Please sign in to comment.