Skip to content

Commit eceec57

Browse files
committed
Auto merge of #64583 - tmandry:rollup-b793x81, r=tmandry
Rollup of 5 pull requests Successful merges: - #64207 (Make rustc_mir::dataflow module pub (for clippy)) - #64348 (PR: documentation spin loop hint) - #64532 (Replace `state_for_location` with `DataflowResultsCursor`) - #64578 (Fix issue22656 with LLDB 8) - #64580 (Update books) Failed merges: r? @ghost
2 parents dece573 + eeda313 commit eceec57

File tree

8 files changed

+50
-90
lines changed

8 files changed

+50
-90
lines changed

src/libcore/hint.rs

+5-17
Original file line numberDiff line numberDiff line change
@@ -49,28 +49,16 @@ pub unsafe fn unreachable_unchecked() -> ! {
4949
intrinsics::unreachable()
5050
}
5151

52-
/// Signals the processor that it is entering a busy-wait spin-loop.
52+
/// Emits a machine instruction hinting to the processor that it is running in busy-wait
53+
/// spin-loop ("spin lock").
5354
///
54-
/// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
55-
/// power or switching hyper-threads.
56-
///
57-
/// This function is different than [`std::thread::yield_now`] which directly yields to the
58-
/// system's scheduler, whereas `spin_loop` only signals the processor that it is entering a
59-
/// busy-wait spin-loop without yielding control to the system's scheduler.
60-
///
61-
/// Using a busy-wait spin-loop with `spin_loop` is ideally used in situations where a
62-
/// contended lock is held by another thread executed on a different CPU and where the waiting
63-
/// times are relatively small. Because entering busy-wait spin-loop does not trigger the system's
64-
/// scheduler, no overhead for switching threads occurs. However, if the thread holding the
65-
/// contended lock is running on the same CPU, the spin-loop is likely to occupy an entire CPU slice
66-
/// before switching to the thread that holds the lock. If the contending lock is held by a thread
67-
/// on the same CPU or if the waiting times for acquiring the lock are longer, it is often better to
68-
/// use [`std::thread::yield_now`].
55+
/// For a discussion of different locking strategies and their trade-offs, see
56+
/// [`core::sync::atomic::spin_loop_hint`].
6957
///
7058
/// **Note**: On platforms that do not support receiving spin-loop hints this function does not
7159
/// do anything at all.
7260
///
73-
/// [`std::thread::yield_now`]: ../../std/thread/fn.yield_now.html
61+
/// [`core::sync::atomic::spin_loop_hint`]: ../sync/atomic/fn.spin_loop_hint.html
7462
#[inline]
7563
#[unstable(feature = "renamed_spin_loop", issue = "55002")]
7664
pub fn spin_loop() {

src/libcore/sync/atomic.rs

+15-12
Original file line numberDiff line numberDiff line change
@@ -124,28 +124,31 @@ use crate::fmt;
124124

125125
use crate::hint::spin_loop;
126126

127-
/// Signals the processor that it is entering a busy-wait spin-loop.
127+
/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
128128
///
129129
/// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving
130130
/// power or switching hyper-threads.
131131
///
132-
/// This function is different than [`std::thread::yield_now`] which directly yields to the
133-
/// system's scheduler, whereas `spin_loop_hint` only signals the processor that it is entering a
134-
/// busy-wait spin-loop without yielding control to the system's scheduler.
132+
/// This function is different from [`std::thread::yield_now`] which directly yields to the
133+
/// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system.
135134
///
136-
/// Using a busy-wait spin-loop with `spin_loop_hint` is ideally used in situations where a
137-
/// contended lock is held by another thread executed on a different CPU and where the waiting
138-
/// times are relatively small. Because entering busy-wait spin-loop does not trigger the system's
139-
/// scheduler, no overhead for switching threads occurs. However, if the thread holding the
140-
/// contended lock is running on the same CPU, the spin-loop is likely to occupy an entire CPU slice
141-
/// before switching to the thread that holds the lock. If the contending lock is held by a thread
142-
/// on the same CPU or if the waiting times for acquiring the lock are longer, it is often better to
143-
/// use [`std::thread::yield_now`].
135+
/// Spin locks can be very efficient for short lock durations because they do not involve context
136+
/// switches or interaction with the operating system. For long lock durations they become wasteful
137+
/// however because they use CPU cycles for the entire lock duration, and using a
138+
/// [`std::sync::Mutex`] is likely the better approach. If actively spinning for a long time is
139+
/// required, e.g. because code polls a non-blocking API, calling [`std::thread::yield_now`]
140+
/// or [`std::thread::sleep`] may be the best option.
141+
///
142+
/// **Note**: Spin locks are based on the underlying assumption that another thread will release
143+
/// the lock 'soon'. In order for this to work, that other thread must run on a different CPU or
144+
/// core (at least potentially). Spin locks do not work efficiently on single CPU / core platforms.
144145
///
145146
/// **Note**: On platforms that do not support receiving spin-loop hints this function does not
146147
/// do anything at all.
147148
///
148149
/// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html
150+
/// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html
151+
/// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html
149152
#[inline]
150153
#[stable(feature = "spin_loop_hint", since = "1.24.0")]
151154
pub fn spin_loop_hint() {

src/librustc_mir/dataflow/mod.rs

+11-35
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ where
5656
/// string (as well as that of rendering up-front); in exchange, you
5757
/// don't have to hand over ownership of your value or deal with
5858
/// borrowing it.
59-
pub(crate) struct DebugFormatted(String);
59+
pub struct DebugFormatted(String);
6060

6161
impl DebugFormatted {
6262
pub fn new(input: &dyn fmt::Debug) -> DebugFormatted {
@@ -70,7 +70,7 @@ impl fmt::Debug for DebugFormatted {
7070
}
7171
}
7272

73-
pub(crate) trait Dataflow<'tcx, BD: BitDenotation<'tcx>> {
73+
pub trait Dataflow<'tcx, BD: BitDenotation<'tcx>> {
7474
/// Sets up and runs the dataflow problem, using `p` to render results if
7575
/// implementation so chooses.
7676
fn dataflow<P>(&mut self, p: P) where P: Fn(&BD, BD::Idx) -> DebugFormatted {
@@ -121,7 +121,7 @@ pub struct MoveDataParamEnv<'tcx> {
121121
pub(crate) param_env: ty::ParamEnv<'tcx>,
122122
}
123123

124-
pub(crate) fn do_dataflow<'a, 'tcx, BD, P>(
124+
pub fn do_dataflow<'a, 'tcx, BD, P>(
125125
tcx: TyCtxt<'tcx>,
126126
body: &'a Body<'tcx>,
127127
def_id: DefId,
@@ -453,34 +453,10 @@ where
453453
{
454454
self.flow_state.each_gen_bit(f)
455455
}
456-
}
457-
458-
pub fn state_for_location<'tcx, T: BitDenotation<'tcx>>(loc: Location,
459-
analysis: &T,
460-
result: &DataflowResults<'tcx, T>,
461-
body: &Body<'tcx>)
462-
-> BitSet<T::Idx> {
463-
let mut trans = GenKill::from_elem(HybridBitSet::new_empty(analysis.bits_per_block()));
464456

465-
for stmt in 0..loc.statement_index {
466-
let mut stmt_loc = loc;
467-
stmt_loc.statement_index = stmt;
468-
analysis.before_statement_effect(&mut trans, stmt_loc);
469-
analysis.statement_effect(&mut trans, stmt_loc);
457+
pub fn get(&self) -> &BitSet<BD::Idx> {
458+
self.flow_state.as_dense()
470459
}
471-
472-
// Apply the pre-statement effect of the statement we're evaluating.
473-
if loc.statement_index == body[loc.block].statements.len() {
474-
analysis.before_terminator_effect(&mut trans, loc);
475-
} else {
476-
analysis.before_statement_effect(&mut trans, loc);
477-
}
478-
479-
// Apply the transfer function for all preceding statements to the fixpoint
480-
// at the start of the block.
481-
let mut state = result.sets().entry_set_for(loc.block.index()).to_owned();
482-
trans.apply(&mut state);
483-
state
484460
}
485461

486462
pub struct DataflowAnalysis<'a, 'tcx, O>
@@ -565,7 +541,7 @@ pub struct GenKill<T> {
565541
pub(crate) kill_set: T,
566542
}
567543

568-
type GenKillSet<T> = GenKill<HybridBitSet<T>>;
544+
pub type GenKillSet<T> = GenKill<HybridBitSet<T>>;
569545

570546
impl<T> GenKill<T> {
571547
/// Creates a new tuple where `gen_set == kill_set == elem`.
@@ -580,28 +556,28 @@ impl<T> GenKill<T> {
580556
}
581557

582558
impl<E:Idx> GenKillSet<E> {
583-
pub(crate) fn clear(&mut self) {
559+
pub fn clear(&mut self) {
584560
self.gen_set.clear();
585561
self.kill_set.clear();
586562
}
587563

588-
fn gen(&mut self, e: E) {
564+
pub fn gen(&mut self, e: E) {
589565
self.gen_set.insert(e);
590566
self.kill_set.remove(e);
591567
}
592568

593-
fn gen_all(&mut self, i: impl IntoIterator<Item: Borrow<E>>) {
569+
pub fn gen_all(&mut self, i: impl IntoIterator<Item: Borrow<E>>) {
594570
for j in i {
595571
self.gen(*j.borrow());
596572
}
597573
}
598574

599-
fn kill(&mut self, e: E) {
575+
pub fn kill(&mut self, e: E) {
600576
self.gen_set.remove(e);
601577
self.kill_set.insert(e);
602578
}
603579

604-
fn kill_all(&mut self, i: impl IntoIterator<Item: Borrow<E>>) {
580+
pub fn kill_all(&mut self, i: impl IntoIterator<Item: Borrow<E>>) {
605581
for j in i {
606582
self.kill(*j.borrow());
607583
}

src/librustc_mir/lib.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ pub mod error_codes;
3535

3636
mod borrow_check;
3737
mod build;
38-
mod dataflow;
38+
pub mod dataflow;
3939
mod hair;
4040
mod lints;
4141
mod shim;

src/librustc_mir/transform/generator.rs

+15-22
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ use crate::transform::{MirPass, MirSource};
6767
use crate::transform::simplify;
6868
use crate::transform::no_landing_pads::no_landing_pads;
6969
use crate::dataflow::{DataflowResults, DataflowResultsConsumer, FlowAtLocation};
70-
use crate::dataflow::{do_dataflow, DebugFormatted, state_for_location};
70+
use crate::dataflow::{do_dataflow, DebugFormatted, DataflowResultsCursor};
7171
use crate::dataflow::{MaybeStorageLive, HaveBeenBorrowedLocals, RequiresStorage};
7272
use crate::util::dump_mir;
7373
use crate::util::liveness;
@@ -436,9 +436,10 @@ fn locals_live_across_suspend_points(
436436
// Calculate when MIR locals have live storage. This gives us an upper bound of their
437437
// lifetimes.
438438
let storage_live_analysis = MaybeStorageLive::new(body);
439-
let storage_live =
439+
let storage_live_results =
440440
do_dataflow(tcx, body, def_id, &[], &dead_unwinds, storage_live_analysis,
441441
|bd, p| DebugFormatted::new(&bd.body().local_decls[p]));
442+
let mut storage_live_cursor = DataflowResultsCursor::new(&storage_live_results, body);
442443

443444
// Find the MIR locals which do not use StorageLive/StorageDead statements.
444445
// The storage of these locals are always live.
@@ -448,17 +449,18 @@ fn locals_live_across_suspend_points(
448449
// Calculate the MIR locals which have been previously
449450
// borrowed (even if they are still active).
450451
let borrowed_locals_analysis = HaveBeenBorrowedLocals::new(body);
451-
let borrowed_locals_result =
452+
let borrowed_locals_results =
452453
do_dataflow(tcx, body, def_id, &[], &dead_unwinds, borrowed_locals_analysis,
453454
|bd, p| DebugFormatted::new(&bd.body().local_decls[p]));
455+
let mut borrowed_locals_cursor = DataflowResultsCursor::new(&borrowed_locals_results, body);
454456

455457
// Calculate the MIR locals that we actually need to keep storage around
456458
// for.
457-
let requires_storage_analysis = RequiresStorage::new(body, &borrowed_locals_result);
458-
let requires_storage =
459+
let requires_storage_analysis = RequiresStorage::new(body, &borrowed_locals_results);
460+
let requires_storage_results =
459461
do_dataflow(tcx, body, def_id, &[], &dead_unwinds, requires_storage_analysis,
460462
|bd, p| DebugFormatted::new(&bd.body().local_decls[p]));
461-
let requires_storage_analysis = RequiresStorage::new(body, &borrowed_locals_result);
463+
let mut requires_storage_cursor = DataflowResultsCursor::new(&requires_storage_results, body);
462464

463465
// Calculate the liveness of MIR locals ignoring borrows.
464466
let mut live_locals = liveness::LiveVarSet::new_empty(body.local_decls.len());
@@ -484,10 +486,6 @@ fn locals_live_across_suspend_points(
484486
};
485487

486488
if !movable {
487-
let borrowed_locals = state_for_location(loc,
488-
&borrowed_locals_analysis,
489-
&borrowed_locals_result,
490-
body);
491489
// The `liveness` variable contains the liveness of MIR locals ignoring borrows.
492490
// This is correct for movable generators since borrows cannot live across
493491
// suspension points. However for immovable generators we need to account for
@@ -498,22 +496,19 @@ fn locals_live_across_suspend_points(
498496
// If a borrow is converted to a raw reference, we must also assume that it lives
499497
// forever. Note that the final liveness is still bounded by the storage liveness
500498
// of the local, which happens using the `intersect` operation below.
501-
liveness.outs[block].union(&borrowed_locals);
499+
borrowed_locals_cursor.seek(loc);
500+
liveness.outs[block].union(borrowed_locals_cursor.get());
502501
}
503502

504-
let storage_liveness = state_for_location(loc,
505-
&storage_live_analysis,
506-
&storage_live,
507-
body);
503+
storage_live_cursor.seek(loc);
504+
let storage_liveness = storage_live_cursor.get();
508505

509506
// Store the storage liveness for later use so we can restore the state
510507
// after a suspension point
511508
storage_liveness_map.insert(block, storage_liveness.clone());
512509

513-
let mut storage_required = state_for_location(loc,
514-
&requires_storage_analysis,
515-
&requires_storage,
516-
body);
510+
requires_storage_cursor.seek(loc);
511+
let mut storage_required = requires_storage_cursor.get().clone();
517512

518513
// Mark locals without storage statements as always requiring storage
519514
storage_required.union(&ignored.0);
@@ -549,8 +544,7 @@ fn locals_live_across_suspend_points(
549544
body,
550545
&live_locals,
551546
&ignored,
552-
requires_storage,
553-
requires_storage_analysis);
547+
requires_storage_results);
554548

555549
LivenessInfo {
556550
live_locals,
@@ -588,7 +582,6 @@ fn compute_storage_conflicts(
588582
stored_locals: &liveness::LiveVarSet,
589583
ignored: &StorageIgnored,
590584
requires_storage: DataflowResults<'tcx, RequiresStorage<'mir, 'tcx>>,
591-
_requires_storage_analysis: RequiresStorage<'mir, 'tcx>,
592585
) -> BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal> {
593586
assert_eq!(body.local_decls.len(), ignored.0.domain_size());
594587
assert_eq!(body.local_decls.len(), stored_locals.domain_size());

src/test/debuginfo/issue-22656.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
// lldbg-check:[...]$0 = vec![1, 2, 3]
1616
// lldbr-check:(alloc::vec::Vec<i32>) v = vec![1, 2, 3]
1717
// lldb-command:print zs
18-
// lldbg-check:[...]$1 = StructWithZeroSizedField { x: ZeroSizedStruct, y: 123, z: ZeroSizedStruct, w: 456 }
18+
// lldbg-check:[...]$1 = StructWithZeroSizedField { x: ZeroSizedStruct[...], y: 123, z: ZeroSizedStruct[...], w: 456 }
1919
// lldbr-check:(issue_22656::StructWithZeroSizedField) zs = StructWithZeroSizedField { x: ZeroSizedStruct { }, y: 123, z: ZeroSizedStruct { }, w: 456 }
2020
// lldbr-command:continue
2121

0 commit comments

Comments
 (0)