|
| 1 | +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT |
| 2 | +// file at the top-level directory of this distribution and at |
| 3 | +// http://rust-lang.org/COPYRIGHT. |
| 4 | +// |
| 5 | +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 6 | +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 7 | +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
| 8 | +// option. This file may not be copied, modified, or distributed |
| 9 | +// except according to those terms. |
| 10 | + |
| 11 | +use arch::wasm32::atomic; |
| 12 | +use cell::UnsafeCell; |
| 13 | +use mem; |
| 14 | +use sync::atomic::{AtomicUsize, AtomicU64, Ordering::SeqCst}; |
| 15 | + |
| 16 | +pub struct Mutex { |
| 17 | + locked: AtomicUsize, |
| 18 | +} |
| 19 | + |
| 20 | +// Mutexes have a pretty simple implementation where they contain an `i32` |
| 21 | +// internally that is 0 when unlocked and 1 when the mutex is locked. |
| 22 | +// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and |
| 23 | +// if it fails it then waits for a notification. Releasing a lock is then done |
| 24 | +// by swapping in 0 and then notifying any waiters, if present. |
| 25 | + |
| 26 | +impl Mutex { |
| 27 | + pub const fn new() -> Mutex { |
| 28 | + Mutex { locked: AtomicUsize::new(0) } |
| 29 | + } |
| 30 | + |
| 31 | + #[inline] |
| 32 | + pub unsafe fn init(&mut self) { |
| 33 | + // nothing to do |
| 34 | + } |
| 35 | + |
| 36 | + pub unsafe fn lock(&self) { |
| 37 | + while !self.try_lock() { |
| 38 | + let val = atomic::wait_i32( |
| 39 | + self.ptr(), |
| 40 | + 1, // we expect our mutex is locked |
| 41 | + -1, // wait infinitely |
| 42 | + ); |
| 43 | + // we should have either woke up (0) or got a not-equal due to a |
| 44 | + // race (1). We should never time out (2) |
| 45 | + debug_assert!(val == 0 || val == 1); |
| 46 | + } |
| 47 | + } |
| 48 | + |
| 49 | + pub unsafe fn unlock(&self) { |
| 50 | + let prev = self.locked.swap(0, SeqCst); |
| 51 | + debug_assert_eq!(prev, 1); |
| 52 | + atomic::wake(self.ptr(), 1); // wake up one waiter, if any |
| 53 | + } |
| 54 | + |
| 55 | + #[inline] |
| 56 | + pub unsafe fn try_lock(&self) -> bool { |
| 57 | + self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() |
| 58 | + } |
| 59 | + |
| 60 | + #[inline] |
| 61 | + pub unsafe fn destroy(&self) { |
| 62 | + // nothing to do |
| 63 | + } |
| 64 | + |
| 65 | + #[inline] |
| 66 | + fn ptr(&self) -> *mut i32 { |
| 67 | + assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>()); |
| 68 | + &self.locked as *const AtomicUsize as *mut isize as *mut i32 |
| 69 | + } |
| 70 | +} |
| 71 | + |
| 72 | +pub struct ReentrantMutex { |
| 73 | + owner: AtomicU64, |
| 74 | + recursions: UnsafeCell<u32>, |
| 75 | +} |
| 76 | + |
| 77 | +unsafe impl Send for ReentrantMutex {} |
| 78 | +unsafe impl Sync for ReentrantMutex {} |
| 79 | + |
| 80 | +// Reentrant mutexes are similarly implemented to mutexs above except that |
| 81 | +// instead of "1" meaning unlocked we use the id of a thread to represent |
| 82 | +// whether it has locked a mutex. That way we have an atomic counter which |
| 83 | +// always holds the id of the thread that currently holds the lock (or 0 if the |
| 84 | +// lock is unlocked). |
| 85 | +// |
| 86 | +// Once a thread acquires a lock recursively, which it detects by looking at |
| 87 | +// the value that's already there, it will update a local `recursions` counter |
| 88 | +// in a nonatomic fashion (as we hold the lock). The lock is then fully |
| 89 | +// released when this recursion counter reaches 0. |
| 90 | + |
| 91 | +impl ReentrantMutex { |
| 92 | + pub unsafe fn uninitialized() -> ReentrantMutex { |
| 93 | + ReentrantMutex { |
| 94 | + owner: AtomicU64::new(0), |
| 95 | + recursions: UnsafeCell::new(0), |
| 96 | + } |
| 97 | + } |
| 98 | + |
| 99 | + pub unsafe fn init(&mut self) { |
| 100 | + // nothing to do... |
| 101 | + } |
| 102 | + |
| 103 | + pub unsafe fn lock(&self) { |
| 104 | + let me = thread_id(); |
| 105 | + while let Err(owner) = self._try_lock(me) { |
| 106 | + let val = atomic::wait_i64(self.ptr(), owner as i64, -1); |
| 107 | + debug_assert!(val == 0 || val == 1); |
| 108 | + } |
| 109 | + } |
| 110 | + |
| 111 | + #[inline] |
| 112 | + pub unsafe fn try_lock(&self) -> bool { |
| 113 | + self._try_lock(thread_id()).is_ok() |
| 114 | + } |
| 115 | + |
| 116 | + #[inline] |
| 117 | + unsafe fn _try_lock(&self, id: u64) -> Result<(), u64> { |
| 118 | + let id = id.checked_add(1).unwrap(); // make sure `id` isn't 0 |
| 119 | + match self.owner.compare_exchange(0, id, SeqCst, SeqCst) { |
| 120 | + // we transitioned from unlocked to locked |
| 121 | + Ok(_) => { |
| 122 | + debug_assert_eq!(*self.recursions.get(), 0); |
| 123 | + Ok(()) |
| 124 | + } |
| 125 | + |
| 126 | + // we currently own this lock, so let's update our count and return |
| 127 | + // true. |
| 128 | + Err(n) if n == id => { |
| 129 | + *self.recursions.get() += 1; |
| 130 | + Ok(()) |
| 131 | + } |
| 132 | + |
| 133 | + // Someone else owns the lock, let our caller take care of it |
| 134 | + Err(other) => Err(other), |
| 135 | + } |
| 136 | + } |
| 137 | + |
| 138 | + pub unsafe fn unlock(&self) { |
| 139 | + // If we didn't ever recursively lock the lock then we fully unlock the |
| 140 | + // mutex and wake up a waiter, if any. Otherwise we decrement our |
| 141 | + // recursive counter and let some one else take care of the zero. |
| 142 | + match *self.recursions.get() { |
| 143 | + 0 => { |
| 144 | + self.owner.swap(0, SeqCst); |
| 145 | + atomic::wake(self.ptr() as *mut i32, 1); // wake up one waiter, if any |
| 146 | + } |
| 147 | + ref mut n => *n -= 1, |
| 148 | + } |
| 149 | + } |
| 150 | + |
| 151 | + pub unsafe fn destroy(&self) { |
| 152 | + // nothing to do... |
| 153 | + } |
| 154 | + |
| 155 | + #[inline] |
| 156 | + fn ptr(&self) -> *mut i64 { |
| 157 | + &self.owner as *const AtomicU64 as *mut i64 |
| 158 | + } |
| 159 | +} |
| 160 | + |
| 161 | +fn thread_id() -> u64 { |
| 162 | + panic!("thread ids not implemented on wasm with atomics yet") |
| 163 | +} |
0 commit comments