Skip to content

Commit b8bea5a

Browse files
committedOct 5, 2018
Auto merge of #54017 - alexcrichton:wasm-atomics2, r=sfackler
std: Start implementing wasm32 atomics This commit is an initial start at implementing the standard library for wasm32-unknown-unknown with the experimental `atomics` feature enabled. None of these changes will be visible to users of the wasm32-unknown-unknown target because they all require recompiling the standard library. The hope with this is that we can get this support into the standard library and start iterating on it in-tree to enable experimentation. Currently there's a few components in this PR: * Atomic fences are disabled on wasm as there's no corresponding atomic op and it's not clear yet what the convention should be, but this will change in the future! * Implementations of `Mutex`, `Condvar`, and `RwLock` were all added based on the atomic intrinsics that wasm has. * The `ReentrantMutex` and thread-local-storage implementations panic currently as there's no great way to get a handle on the current thread's "id" yet. Right now the wasm32 target with atomics is unfortunately pretty unusable, requiring a lot of manual things here and there to actually get it operational. This will likely continue to evolve as the story for atomics and wasm unfolds, but we also need more LLVM support for some operations like custom `global` directives for this to work best.
2 parents 8c4ad4e + b4877ed commit b8bea5a

10 files changed

+510
-5
lines changed
 

‎src/libcore/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,7 @@
117117
#![feature(powerpc_target_feature)]
118118
#![feature(mips_target_feature)]
119119
#![feature(aarch64_target_feature)]
120+
#![feature(wasm_target_feature)]
120121
#![feature(const_slice_len)]
121122
#![feature(const_str_as_bytes)]
122123
#![feature(const_str_len)]

‎src/libcore/sync/atomic.rs

+8
Original file line numberDiff line numberDiff line change
@@ -2251,7 +2251,15 @@ unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
22512251
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
22522252
#[inline]
22532253
#[stable(feature = "rust1", since = "1.0.0")]
2254+
#[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
22542255
pub fn fence(order: Ordering) {
2256+
// On wasm32 it looks like fences aren't implemented in LLVM yet in that
2257+
// they will cause LLVM to abort. The wasm instruction set doesn't have
2258+
// fences right now. There's discussion online about the best way for tools
2259+
// to conventionally implement fences at
2260+
// https://github.com/WebAssembly/tool-conventions/issues/59. We should
2261+
// follow that discussion and implement a solution when one comes about!
2262+
#[cfg(not(target_arch = "wasm32"))]
22552263
unsafe {
22562264
match order {
22572265
Acquire => intrinsics::atomic_fence_acq(),

‎src/libstd/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,7 @@
257257
#![feature(const_cstr_unchecked)]
258258
#![feature(core_intrinsics)]
259259
#![feature(dropck_eyepatch)]
260+
#![feature(duration_as_u128)]
260261
#![feature(exact_size_is_empty)]
261262
#![feature(external_doc)]
262263
#![feature(fixed_size_array)]
+104
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
11+
use arch::wasm32::atomic;
12+
use cmp;
13+
use mem;
14+
use sync::atomic::{AtomicUsize, Ordering::SeqCst};
15+
use sys::mutex::Mutex;
16+
use time::Duration;
17+
18+
pub struct Condvar {
19+
cnt: AtomicUsize,
20+
}
21+
22+
// Condition variables are implemented with a simple counter internally that is
23+
// likely to cause spurious wakeups. Blocking on a condition variable will first
24+
// read the value of the internal counter, unlock the given mutex, and then
25+
// block if and only if the counter's value is still the same. Notifying a
26+
// condition variable will modify the counter (add one for now) and then wake up
27+
// a thread waiting on the address of the counter.
28+
//
29+
// A thread waiting on the condition variable will as a result avoid going to
30+
// sleep if it's notified after the lock is unlocked but before it fully goes to
31+
// sleep. A sleeping thread is guaranteed to be woken up at some point as it can
32+
// only be woken up with a call to `wake`.
33+
//
34+
// Note that it's possible for 2 or more threads to be woken up by a call to
35+
// `notify_one` with this implementation. That can happen where the modification
36+
// of `cnt` causes any threads in the middle of `wait` to avoid going to sleep,
37+
// and the subsequent `wake` may wake up a thread that's actually blocking. We
38+
// consider this a spurious wakeup, though, which all users of condition
39+
// variables must already be prepared to handle. As a result, this source of
40+
// spurious wakeups is currently though to be ok, although it may be problematic
41+
// later on if it causes too many spurious wakeups.
42+
43+
impl Condvar {
44+
pub const fn new() -> Condvar {
45+
Condvar { cnt: AtomicUsize::new(0) }
46+
}
47+
48+
#[inline]
49+
pub unsafe fn init(&mut self) {
50+
// nothing to do
51+
}
52+
53+
pub unsafe fn notify_one(&self) {
54+
self.cnt.fetch_add(1, SeqCst);
55+
atomic::wake(self.ptr(), 1);
56+
}
57+
58+
#[inline]
59+
pub unsafe fn notify_all(&self) {
60+
self.cnt.fetch_add(1, SeqCst);
61+
atomic::wake(self.ptr(), -1); // -1 == "wake everyone"
62+
}
63+
64+
pub unsafe fn wait(&self, mutex: &Mutex) {
65+
// "atomically block and unlock" implemented by loading our current
66+
// counter's value, unlocking the mutex, and blocking if the counter
67+
// still has the same value.
68+
//
69+
// Notifications happen by incrementing the counter and then waking a
70+
// thread. Incrementing the counter after we unlock the mutex will
71+
// prevent us from sleeping and otherwise the call to `wake` will
72+
// wake us up once we're asleep.
73+
let ticket = self.cnt.load(SeqCst) as i32;
74+
mutex.unlock();
75+
let val = atomic::wait_i32(self.ptr(), ticket, -1);
76+
// 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen)
77+
debug_assert!(val == 0 || val == 1);
78+
mutex.lock();
79+
}
80+
81+
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
82+
let ticket = self.cnt.load(SeqCst) as i32;
83+
mutex.unlock();
84+
let nanos = dur.as_nanos();
85+
let nanos = cmp::min(i64::max_value() as u128, nanos);
86+
87+
// If the return value is 2 then a timeout happened, so we return
88+
// `false` as we weren't actually notified.
89+
let ret = atomic::wait_i32(self.ptr(), ticket, nanos as i64) != 2;
90+
mutex.lock();
91+
return ret
92+
}
93+
94+
#[inline]
95+
pub unsafe fn destroy(&self) {
96+
// nothing to do
97+
}
98+
99+
#[inline]
100+
fn ptr(&self) -> *mut i32 {
101+
assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
102+
&self.cnt as *const AtomicUsize as *mut i32
103+
}
104+
}

‎src/libstd/sys/wasm/mod.rs

+18-4
Original file line numberDiff line numberDiff line change
@@ -36,24 +36,38 @@ pub mod args;
3636
#[cfg(feature = "backtrace")]
3737
pub mod backtrace;
3838
pub mod cmath;
39-
pub mod condvar;
4039
pub mod env;
4140
pub mod fs;
4241
pub mod memchr;
43-
pub mod mutex;
4442
pub mod net;
4543
pub mod os;
4644
pub mod os_str;
4745
pub mod path;
4846
pub mod pipe;
4947
pub mod process;
50-
pub mod rwlock;
5148
pub mod stack_overflow;
5249
pub mod thread;
53-
pub mod thread_local;
5450
pub mod time;
5551
pub mod stdio;
5652

53+
cfg_if! {
54+
if #[cfg(target_feature = "atomics")] {
55+
#[path = "condvar_atomics.rs"]
56+
pub mod condvar;
57+
#[path = "mutex_atomics.rs"]
58+
pub mod mutex;
59+
#[path = "rwlock_atomics.rs"]
60+
pub mod rwlock;
61+
#[path = "thread_local_atomics.rs"]
62+
pub mod thread_local;
63+
} else {
64+
pub mod condvar;
65+
pub mod mutex;
66+
pub mod rwlock;
67+
pub mod thread_local;
68+
}
69+
}
70+
5771
#[cfg(not(test))]
5872
pub fn init() {
5973
}

‎src/libstd/sys/wasm/mutex_atomics.rs

+163
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
11+
use arch::wasm32::atomic;
12+
use cell::UnsafeCell;
13+
use mem;
14+
use sync::atomic::{AtomicUsize, AtomicU64, Ordering::SeqCst};
15+
16+
pub struct Mutex {
17+
locked: AtomicUsize,
18+
}
19+
20+
// Mutexes have a pretty simple implementation where they contain an `i32`
21+
// internally that is 0 when unlocked and 1 when the mutex is locked.
22+
// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and
23+
// if it fails it then waits for a notification. Releasing a lock is then done
24+
// by swapping in 0 and then notifying any waiters, if present.
25+
26+
impl Mutex {
27+
pub const fn new() -> Mutex {
28+
Mutex { locked: AtomicUsize::new(0) }
29+
}
30+
31+
#[inline]
32+
pub unsafe fn init(&mut self) {
33+
// nothing to do
34+
}
35+
36+
pub unsafe fn lock(&self) {
37+
while !self.try_lock() {
38+
let val = atomic::wait_i32(
39+
self.ptr(),
40+
1, // we expect our mutex is locked
41+
-1, // wait infinitely
42+
);
43+
// we should have either woke up (0) or got a not-equal due to a
44+
// race (1). We should never time out (2)
45+
debug_assert!(val == 0 || val == 1);
46+
}
47+
}
48+
49+
pub unsafe fn unlock(&self) {
50+
let prev = self.locked.swap(0, SeqCst);
51+
debug_assert_eq!(prev, 1);
52+
atomic::wake(self.ptr(), 1); // wake up one waiter, if any
53+
}
54+
55+
#[inline]
56+
pub unsafe fn try_lock(&self) -> bool {
57+
self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
58+
}
59+
60+
#[inline]
61+
pub unsafe fn destroy(&self) {
62+
// nothing to do
63+
}
64+
65+
#[inline]
66+
fn ptr(&self) -> *mut i32 {
67+
assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
68+
&self.locked as *const AtomicUsize as *mut isize as *mut i32
69+
}
70+
}
71+
72+
pub struct ReentrantMutex {
73+
owner: AtomicU64,
74+
recursions: UnsafeCell<u32>,
75+
}
76+
77+
unsafe impl Send for ReentrantMutex {}
78+
unsafe impl Sync for ReentrantMutex {}
79+
80+
// Reentrant mutexes are similarly implemented to mutexs above except that
81+
// instead of "1" meaning unlocked we use the id of a thread to represent
82+
// whether it has locked a mutex. That way we have an atomic counter which
83+
// always holds the id of the thread that currently holds the lock (or 0 if the
84+
// lock is unlocked).
85+
//
86+
// Once a thread acquires a lock recursively, which it detects by looking at
87+
// the value that's already there, it will update a local `recursions` counter
88+
// in a nonatomic fashion (as we hold the lock). The lock is then fully
89+
// released when this recursion counter reaches 0.
90+
91+
impl ReentrantMutex {
92+
pub unsafe fn uninitialized() -> ReentrantMutex {
93+
ReentrantMutex {
94+
owner: AtomicU64::new(0),
95+
recursions: UnsafeCell::new(0),
96+
}
97+
}
98+
99+
pub unsafe fn init(&mut self) {
100+
// nothing to do...
101+
}
102+
103+
pub unsafe fn lock(&self) {
104+
let me = thread_id();
105+
while let Err(owner) = self._try_lock(me) {
106+
let val = atomic::wait_i64(self.ptr(), owner as i64, -1);
107+
debug_assert!(val == 0 || val == 1);
108+
}
109+
}
110+
111+
#[inline]
112+
pub unsafe fn try_lock(&self) -> bool {
113+
self._try_lock(thread_id()).is_ok()
114+
}
115+
116+
#[inline]
117+
unsafe fn _try_lock(&self, id: u64) -> Result<(), u64> {
118+
let id = id.checked_add(1).unwrap(); // make sure `id` isn't 0
119+
match self.owner.compare_exchange(0, id, SeqCst, SeqCst) {
120+
// we transitioned from unlocked to locked
121+
Ok(_) => {
122+
debug_assert_eq!(*self.recursions.get(), 0);
123+
Ok(())
124+
}
125+
126+
// we currently own this lock, so let's update our count and return
127+
// true.
128+
Err(n) if n == id => {
129+
*self.recursions.get() += 1;
130+
Ok(())
131+
}
132+
133+
// Someone else owns the lock, let our caller take care of it
134+
Err(other) => Err(other),
135+
}
136+
}
137+
138+
pub unsafe fn unlock(&self) {
139+
// If we didn't ever recursively lock the lock then we fully unlock the
140+
// mutex and wake up a waiter, if any. Otherwise we decrement our
141+
// recursive counter and let some one else take care of the zero.
142+
match *self.recursions.get() {
143+
0 => {
144+
self.owner.swap(0, SeqCst);
145+
atomic::wake(self.ptr() as *mut i32, 1); // wake up one waiter, if any
146+
}
147+
ref mut n => *n -= 1,
148+
}
149+
}
150+
151+
pub unsafe fn destroy(&self) {
152+
// nothing to do...
153+
}
154+
155+
#[inline]
156+
fn ptr(&self) -> *mut i64 {
157+
&self.owner as *const AtomicU64 as *mut i64
158+
}
159+
}
160+
161+
fn thread_id() -> u64 {
162+
panic!("thread ids not implemented on wasm with atomics yet")
163+
}

‎src/libstd/sys/wasm/rwlock_atomics.rs

+161
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
11+
use cell::UnsafeCell;
12+
use sys::mutex::Mutex;
13+
use sys::condvar::Condvar;
14+
15+
pub struct RWLock {
16+
lock: Mutex,
17+
cond: Condvar,
18+
state: UnsafeCell<State>,
19+
}
20+
21+
enum State {
22+
Unlocked,
23+
Reading(usize),
24+
Writing,
25+
}
26+
27+
unsafe impl Send for RWLock {}
28+
unsafe impl Sync for RWLock {}
29+
30+
// This rwlock implementation is a relatively simple implementation which has a
31+
// condition variable for readers/writers as well as a mutex protecting the
32+
// internal state of the lock. A current downside of the implementation is that
33+
// unlocking the lock will notify *all* waiters rather than just readers or just
34+
// writers. This can cause lots of "thundering stampede" problems. While
35+
// hopefully correct this implementation is very likely to want to be changed in
36+
// the future.
37+
38+
impl RWLock {
39+
pub const fn new() -> RWLock {
40+
RWLock {
41+
lock: Mutex::new(),
42+
cond: Condvar::new(),
43+
state: UnsafeCell::new(State::Unlocked),
44+
}
45+
}
46+
47+
#[inline]
48+
pub unsafe fn read(&self) {
49+
self.lock.lock();
50+
while !(*self.state.get()).inc_readers() {
51+
self.cond.wait(&self.lock);
52+
}
53+
self.lock.unlock();
54+
}
55+
56+
#[inline]
57+
pub unsafe fn try_read(&self) -> bool {
58+
self.lock.lock();
59+
let ok = (*self.state.get()).inc_readers();
60+
self.lock.unlock();
61+
return ok
62+
}
63+
64+
#[inline]
65+
pub unsafe fn write(&self) {
66+
self.lock.lock();
67+
while !(*self.state.get()).inc_writers() {
68+
self.cond.wait(&self.lock);
69+
}
70+
self.lock.unlock();
71+
}
72+
73+
#[inline]
74+
pub unsafe fn try_write(&self) -> bool {
75+
self.lock.lock();
76+
let ok = (*self.state.get()).inc_writers();
77+
self.lock.unlock();
78+
return ok
79+
}
80+
81+
#[inline]
82+
pub unsafe fn read_unlock(&self) {
83+
self.lock.lock();
84+
let notify = (*self.state.get()).dec_readers();
85+
self.lock.unlock();
86+
if notify {
87+
// FIXME: should only wake up one of these some of the time
88+
self.cond.notify_all();
89+
}
90+
}
91+
92+
#[inline]
93+
pub unsafe fn write_unlock(&self) {
94+
self.lock.lock();
95+
(*self.state.get()).dec_writers();
96+
self.lock.unlock();
97+
// FIXME: should only wake up one of these some of the time
98+
self.cond.notify_all();
99+
}
100+
101+
#[inline]
102+
pub unsafe fn destroy(&self) {
103+
self.lock.destroy();
104+
self.cond.destroy();
105+
}
106+
}
107+
108+
impl State {
109+
fn inc_readers(&mut self) -> bool {
110+
match *self {
111+
State::Unlocked => {
112+
*self = State::Reading(1);
113+
true
114+
}
115+
State::Reading(ref mut cnt) => {
116+
*cnt += 1;
117+
true
118+
}
119+
State::Writing => false
120+
}
121+
}
122+
123+
fn inc_writers(&mut self) -> bool {
124+
match *self {
125+
State::Unlocked => {
126+
*self = State::Writing;
127+
true
128+
}
129+
State::Reading(_) |
130+
State::Writing => false
131+
}
132+
}
133+
134+
fn dec_readers(&mut self) -> bool {
135+
let zero = match *self {
136+
State::Reading(ref mut cnt) => {
137+
*cnt -= 1;
138+
*cnt == 0
139+
}
140+
State::Unlocked |
141+
State::Writing => invalid(),
142+
};
143+
if zero {
144+
*self = State::Unlocked;
145+
}
146+
zero
147+
}
148+
149+
fn dec_writers(&mut self) {
150+
match *self {
151+
State::Writing => {}
152+
State::Unlocked |
153+
State::Reading(_) => invalid(),
154+
}
155+
*self = State::Unlocked;
156+
}
157+
}
158+
159+
fn invalid() -> ! {
160+
panic!("inconsistent rwlock");
161+
}

‎src/libstd/sys/wasm/thread.rs

+21
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,31 @@ impl Thread {
3333
// nope
3434
}
3535

36+
#[cfg(not(target_feature = "atomics"))]
3637
pub fn sleep(_dur: Duration) {
3738
panic!("can't sleep");
3839
}
3940

41+
#[cfg(target_feature = "atomics")]
42+
pub fn sleep(dur: Duration) {
43+
use arch::wasm32::atomic;
44+
use cmp;
45+
46+
// Use an atomic wait to block the current thread artificially with a
47+
// timeout listed. Note that we should never be notified (return value
48+
// of 0) or our comparison should never fail (return value of 1) so we
49+
// should always only resume execution through a timeout (return value
50+
// 2).
51+
let mut nanos = dur.as_nanos();
52+
while nanos > 0 {
53+
let amt = cmp::min(i64::max_value() as u128, nanos);
54+
let mut x = 0;
55+
let val = unsafe { atomic::wait_i32(&mut x, 0, amt as i64) };
56+
debug_assert_eq!(val, 2);
57+
nanos -= amt;
58+
}
59+
}
60+
4061
pub fn join(self) {
4162
match self.0 {}
4263
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
11+
pub type Key = usize;
12+
13+
pub unsafe fn create(_dtor: Option<unsafe extern fn(*mut u8)>) -> Key {
14+
panic!("TLS on wasm with atomics not implemented yet");
15+
}
16+
17+
pub unsafe fn set(_key: Key, _value: *mut u8) {
18+
panic!("TLS on wasm with atomics not implemented yet");
19+
}
20+
21+
pub unsafe fn get(_key: Key) -> *mut u8 {
22+
panic!("TLS on wasm with atomics not implemented yet");
23+
}
24+
25+
pub unsafe fn destroy(_key: Key) {
26+
panic!("TLS on wasm with atomics not implemented yet");
27+
}
28+
29+
#[inline]
30+
pub fn requires_synchronized_create() -> bool {
31+
false
32+
}

‎src/stdsimd

0 commit comments

Comments
 (0)
Please sign in to comment.