|
8 | 8 | use super::{Guard, Lock, NeedsLockClass};
|
9 | 9 | use crate::bindings;
|
10 | 10 | use crate::str::CStr;
|
| 11 | +use crate::Result; |
| 12 | +use alloc::boxed::Box; |
11 | 13 | use core::{cell::UnsafeCell, marker::PhantomPinned, mem::MaybeUninit, pin::Pin};
|
12 | 14 |
|
13 | 15 | extern "C" {
|
@@ -135,3 +137,118 @@ impl NeedsLockClass for CondVar {
|
135 | 137 | unsafe { bindings::__init_waitqueue_head(self.wait_list.get(), name.as_char_ptr(), key) };
|
136 | 138 | }
|
137 | 139 | }
|
| 140 | + |
| 141 | +/// Exposes the kernel's [`struct wait_queue_head`] as a condition variable. It allows the caller to |
| 142 | +/// atomically release the given lock and go to sleep. It reacquires the lock when it wakes up. And |
| 143 | +/// it wakes up when notified by another thread (via [`CondVar::notify_one`] or |
| 144 | +/// [`CondVar::notify_all`]) or because the thread received a signal. |
| 145 | +/// |
| 146 | +/// [`struct wait_queue_head`]: ../../../include/linux/wait.h |
| 147 | +/// |
| 148 | +/// # Invariants |
| 149 | +/// |
| 150 | +/// `wait_list` never moves out of its [`Box`]. |
| 151 | +pub struct BoxedCondVar { |
| 152 | + /// A `bindings::wait_queue_head` kernel object. |
| 153 | + /// It contains a [`struct list_head`] that is self-referential, so |
| 154 | + /// it cannot be safely moved once it is initialised. |
| 155 | + /// We guarantee that it will never move, as per the invariant above. |
| 156 | + wait_list: Box<UnsafeCell<bindings::wait_queue_head>>, |
| 157 | +} |
| 158 | + |
| 159 | +// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread. |
| 160 | +unsafe impl Send for BoxedCondVar {} |
| 161 | + |
| 162 | +// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads |
| 163 | +// concurrently. |
| 164 | +unsafe impl Sync for BoxedCondVar {} |
| 165 | + |
| 166 | +impl BoxedCondVar { |
| 167 | + /// Constructs a new condition variable. |
| 168 | + /// |
| 169 | + /// # Safety |
| 170 | + /// |
| 171 | + /// `key` must point to a valid memory location as it will be used by the kernel. |
| 172 | + pub unsafe fn new_with_key( |
| 173 | + name: &'static CStr, |
| 174 | + key: *mut bindings::lock_class_key, |
| 175 | + ) -> Result<Self> { |
| 176 | + let cv = Self { |
| 177 | + wait_list: Box::try_new(UnsafeCell::new(bindings::wait_queue_head::default()))?, |
| 178 | + }; |
| 179 | + unsafe { |
| 180 | + bindings::__init_waitqueue_head(cv.wait_list.get(), name.as_char_ptr(), key); |
| 181 | + } |
| 182 | + Ok(cv) |
| 183 | + } |
| 184 | + |
| 185 | + /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the |
| 186 | + /// thread to sleep. It wakes up when notified by [`CondVar::notify_one`] or |
| 187 | + /// [`CondVar::notify_all`], or when the thread receives a signal. |
| 188 | + /// |
| 189 | + /// Returns whether there is a signal pending. |
| 190 | + #[must_use = "wait returns if a signal is pending, so the caller must check the return value"] |
| 191 | + pub fn wait<L: Lock>(&self, guard: &mut Guard<'_, L>) -> bool { |
| 192 | + let lock = guard.lock; |
| 193 | + let mut wait = MaybeUninit::<bindings::wait_queue_entry>::uninit(); |
| 194 | + |
| 195 | + // SAFETY: `wait` points to valid memory. |
| 196 | + unsafe { rust_helper_init_wait(wait.as_mut_ptr()) }; |
| 197 | + |
| 198 | + // SAFETY: Both `wait` and `wait_list` point to valid memory. |
| 199 | + unsafe { |
| 200 | + bindings::prepare_to_wait_exclusive( |
| 201 | + self.wait_list.get(), |
| 202 | + wait.as_mut_ptr(), |
| 203 | + bindings::TASK_INTERRUPTIBLE as _, |
| 204 | + ); |
| 205 | + } |
| 206 | + |
| 207 | + // SAFETY: The guard is evidence that the caller owns the lock. |
| 208 | + unsafe { lock.unlock() }; |
| 209 | + |
| 210 | + // SAFETY: No arguments, switches to another thread. |
| 211 | + unsafe { bindings::schedule() }; |
| 212 | + |
| 213 | + lock.lock_noguard(); |
| 214 | + |
| 215 | + // SAFETY: Both `wait` and `wait_list` point to valid memory. |
| 216 | + unsafe { bindings::finish_wait(self.wait_list.get(), wait.as_mut_ptr()) }; |
| 217 | + |
| 218 | + super::signal_pending() |
| 219 | + } |
| 220 | + |
| 221 | + /// Calls the kernel function to notify the appropriate number of threads with the given flags. |
| 222 | + fn notify(&self, count: i32, flags: u32) { |
| 223 | + // SAFETY: `wait_list` points to valid memory. |
| 224 | + unsafe { |
| 225 | + bindings::__wake_up( |
| 226 | + self.wait_list.get(), |
| 227 | + bindings::TASK_NORMAL, |
| 228 | + count, |
| 229 | + flags as _, |
| 230 | + ) |
| 231 | + }; |
| 232 | + } |
| 233 | + |
| 234 | + /// Wakes a single waiter up, if any. This is not 'sticky' in the sense that if no thread is |
| 235 | + /// waiting, the notification is lost completely (as opposed to automatically waking up the |
| 236 | + /// next waiter). |
| 237 | + pub fn notify_one(&self) { |
| 238 | + self.notify(1, 0); |
| 239 | + } |
| 240 | + |
| 241 | + /// Wakes all waiters up, if any. This is not 'sticky' in the sense that if no thread is |
| 242 | + /// waiting, the notification is lost completely (as opposed to automatically waking up the |
| 243 | + /// next waiter). |
| 244 | + pub fn notify_all(&self) { |
| 245 | + self.notify(0, 0); |
| 246 | + } |
| 247 | + |
| 248 | + /// Wakes all waiters up. If they were added by `epoll`, they are also removed from the list of |
| 249 | + /// waiters. This is useful when cleaning up a condition variable that may be waited on by |
| 250 | + /// threads that use `epoll`. |
| 251 | + pub fn free_waiters(&self) { |
| 252 | + self.notify(1, bindings::POLLHUP | POLLFREE); |
| 253 | + } |
| 254 | +} |
0 commit comments