1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
//! This module defines an `IdleNotifiedSet`, which is a collection of elements.
//! Each element is intended to correspond to a task, and the collection will
//! keep track of which tasks have had their waker notified, and which have not.
//!
//! Each entry in the set holds some user-specified value. The value's type is
//! specified using the `T` parameter. It will usually be a `JoinHandle` or
//! similar.
use std::marker::PhantomPinned;
use std::mem::ManuallyDrop;
use std::ptr::NonNull;
use std::task::{Context, Waker};
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::{Arc, Mutex};
use crate::util::linked_list::{self, Link};
use crate::util::{waker_ref, Wake};
type LinkedList<T> =
linked_list::LinkedList<ListEntry<T>, <ListEntry<T> as linked_list::Link>::Target>;
/// This is the main handle to the collection.
pub(crate) struct IdleNotifiedSet<T> {
lists: Arc<Lists<T>>,
length: usize,
}
/// A handle to an entry that is guaranteed to be stored in the idle or notified
/// list of its `IdleNotifiedSet`. This value borrows the `IdleNotifiedSet`
/// mutably to prevent the entry from being moved to the `Neither` list, which
/// only the `IdleNotifiedSet` may do.
///
/// The main consequence of being stored in one of the lists is that the `value`
/// field has not yet been consumed.
///
/// Note: This entry can be moved from the idle to the notified list while this
/// object exists by waking its waker.
pub(crate) struct EntryInOneOfTheLists<'a, T> {
entry: Arc<ListEntry<T>>,
set: &'a mut IdleNotifiedSet<T>,
}
type Lists<T> = Mutex<ListsInner<T>>;
/// The linked lists hold strong references to the ListEntry items, and the
/// ListEntry items also hold a strong reference back to the Lists object, but
/// the destructor of the `IdleNotifiedSet` will clear the two lists, so once
/// that object is destroyed, no ref-cycles will remain.
struct ListsInner<T> {
notified: LinkedList<T>,
idle: LinkedList<T>,
/// Whenever an element in the `notified` list is woken, this waker will be
/// notified and consumed, if it exists.
waker: Option<Waker>,
}
/// Which of the two lists in the shared Lists object is this entry stored in?
///
/// If the value is `Idle`, then an entry's waker may move it to the notified
/// list. Otherwise, only the `IdleNotifiedSet` may move it.
///
/// If the value is `Neither`, then it is still possible that the entry is in
/// some third external list (this happens in `drain`).
#[derive(Copy, Clone, Eq, PartialEq)]
enum List {
Notified,
Idle,
Neither,
}
/// An entry in the list.
///
/// # Safety
///
/// The `my_list` field must only be accessed while holding the mutex in
/// `parent`. It is an invariant that the value of `my_list` corresponds to
/// which linked list in the `parent` holds this entry. Once this field takes
/// the value `Neither`, then it may never be modified again.
///
/// If the value of `my_list` is `Notified` or `Idle`, then the `pointers` field
/// must only be accessed while holding the mutex. If the value of `my_list` is
/// `Neither`, then the `pointers` field may be accessed by the
/// `IdleNotifiedSet` (this happens inside `drain`).
///
/// The `value` field is owned by the `IdleNotifiedSet` and may only be accessed
/// by the `IdleNotifiedSet`. The operation that sets the value of `my_list` to
/// `Neither` assumes ownership of the `value`, and it must either drop it or
/// move it out from this entry to prevent it from getting leaked. (Since the
/// two linked lists are emptied in the destructor of `IdleNotifiedSet`, the
/// value should not be leaked.)
struct ListEntry<T> {
/// The linked list pointers of the list this entry is in.
pointers: linked_list::Pointers<ListEntry<T>>,
/// Pointer to the shared `Lists` struct.
parent: Arc<Lists<T>>,
/// The value stored in this entry.
value: UnsafeCell<ManuallyDrop<T>>,
/// Used to remember which list this entry is in.
my_list: UnsafeCell<List>,
/// Required by the `linked_list::Pointers` field.
_pin: PhantomPinned,
}
generate_addr_of_methods! {
impl<T> ListEntry<T> {
unsafe fn addr_of_pointers(self: NonNull<Self>) -> NonNull<linked_list::Pointers<ListEntry<T>>> {
&self.pointers
}
}
}
// With mutable access to the `IdleNotifiedSet`, you can get mutable access to
// the values.
unsafe impl<T: Send> Send for IdleNotifiedSet<T> {}
// With the current API we strictly speaking don't even need `T: Sync`, but we
// require it anyway to support adding &self APIs that access the values in the
// future.
unsafe impl<T: Sync> Sync for IdleNotifiedSet<T> {}
// These impls control when it is safe to create a Waker. Since the waker does
// not allow access to the value in any way (including its destructor), it is
// not necessary for `T` to be Send or Sync.
unsafe impl<T> Send for ListEntry<T> {}
unsafe impl<T> Sync for ListEntry<T> {}
impl<T> IdleNotifiedSet<T> {
/// Create a new IdleNotifiedSet.
pub(crate) fn new() -> Self {
let lists = Mutex::new(ListsInner {
notified: LinkedList::new(),
idle: LinkedList::new(),
waker: None,
});
IdleNotifiedSet {
lists: Arc::new(lists),
length: 0,
}
}
pub(crate) fn len(&self) -> usize {
self.length
}
pub(crate) fn is_empty(&self) -> bool {
self.length == 0
}
/// Insert the given value into the `idle` list.
pub(crate) fn insert_idle(&mut self, value: T) -> EntryInOneOfTheLists<'_, T> {
self.length += 1;
let entry = Arc::new(ListEntry {
parent: self.lists.clone(),
value: UnsafeCell::new(ManuallyDrop::new(value)),
my_list: UnsafeCell::new(List::Idle),
pointers: linked_list::Pointers::new(),
_pin: PhantomPinned,
});
{
let mut lock = self.lists.lock();
lock.idle.push_front(entry.clone());
}
// Safety: We just put the entry in the idle list, so it is in one of the lists.
EntryInOneOfTheLists { entry, set: self }
}
/// Pop an entry from the notified list to poll it. The entry is moved to
/// the idle list atomically.
pub(crate) fn pop_notified(&mut self, waker: &Waker) -> Option<EntryInOneOfTheLists<'_, T>> {
// We don't decrement the length because this call moves the entry to
// the idle list rather than removing it.
if self.length == 0 {
// Fast path.
return None;
}
let mut lock = self.lists.lock();
let should_update_waker = match lock.waker.as_mut() {
Some(cur_waker) => !waker.will_wake(cur_waker),
None => true,
};
if should_update_waker {
lock.waker = Some(waker.clone());
}
// Pop the entry, returning None if empty.
let entry = lock.notified.pop_back()?;
lock.idle.push_front(entry.clone());
// Safety: We are holding the lock.
entry.my_list.with_mut(|ptr| unsafe {
*ptr = List::Idle;
});
drop(lock);
// Safety: We just put the entry in the idle list, so it is in one of the lists.
Some(EntryInOneOfTheLists { entry, set: self })
}
/// Call a function on every element in this list.
pub(crate) fn for_each<F: FnMut(&mut T)>(&mut self, mut func: F) {
fn get_ptrs<T>(list: &mut LinkedList<T>, ptrs: &mut Vec<*mut T>) {
let mut node = list.last();
while let Some(entry) = node {
ptrs.push(entry.value.with_mut(|ptr| {
let ptr: *mut ManuallyDrop<T> = ptr;
let ptr: *mut T = ptr.cast();
ptr
}));
let prev = entry.pointers.get_prev();
node = prev.map(|prev| unsafe { &*prev.as_ptr() });
}
}
// Atomically get a raw pointer to the value of every entry.
//
// Since this only locks the mutex once, it is not possible for a value
// to get moved from the idle list to the notified list during the
// operation, which would otherwise result in some value being listed
// twice.
let mut ptrs = Vec::with_capacity(self.len());
{
let mut lock = self.lists.lock();
get_ptrs(&mut lock.idle, &mut ptrs);
get_ptrs(&mut lock.notified, &mut ptrs);
}
debug_assert_eq!(ptrs.len(), ptrs.capacity());
for ptr in ptrs {
// Safety: When we grabbed the pointers, the entries were in one of
// the two lists. This means that their value was valid at the time,
// and it must still be valid because we are the IdleNotifiedSet,
// and only we can remove an entry from the two lists. (It's
// possible that an entry is moved from one list to the other during
// this loop, but that is ok.)
func(unsafe { &mut *ptr });
}
}
/// Remove all entries in both lists, applying some function to each element.
///
/// The closure is called on all elements even if it panics. Having it panic
/// twice is a double-panic, and will abort the application.
pub(crate) fn drain<F: FnMut(T)>(&mut self, func: F) {
if self.length == 0 {
// Fast path.
return;
}
self.length = 0;
// The LinkedList is not cleared on panic, so we use a bomb to clear it.
//
// This value has the invariant that any entry in its `all_entries` list
// has `my_list` set to `Neither` and that the value has not yet been
// dropped.
struct AllEntries<T, F: FnMut(T)> {
all_entries: LinkedList<T>,
func: F,
}
impl<T, F: FnMut(T)> AllEntries<T, F> {
fn pop_next(&mut self) -> bool {
if let Some(entry) = self.all_entries.pop_back() {
// Safety: We just took this value from the list, so we can
// destroy the value in the entry.
entry
.value
.with_mut(|ptr| unsafe { (self.func)(ManuallyDrop::take(&mut *ptr)) });
true
} else {
false
}
}
}
impl<T, F: FnMut(T)> Drop for AllEntries<T, F> {
fn drop(&mut self) {
while self.pop_next() {}
}
}
let mut all_entries = AllEntries {
all_entries: LinkedList::new(),
func,
};
// Atomically move all entries to the new linked list in the AllEntries
// object.
{
let mut lock = self.lists.lock();
unsafe {
// Safety: We are holding the lock and `all_entries` is a new
// LinkedList.
move_to_new_list(&mut lock.idle, &mut all_entries.all_entries);
move_to_new_list(&mut lock.notified, &mut all_entries.all_entries);
}
}
// Keep destroying entries in the list until it is empty.
//
// If the closure panics, then the destructor of the `AllEntries` bomb
// ensures that we keep running the destructor on the remaining values.
// A second panic will abort the program.
while all_entries.pop_next() {}
}
}
/// # Safety
///
/// The mutex for the entries must be held, and the target list must be such
/// that setting `my_list` to `Neither` is ok.
unsafe fn move_to_new_list<T>(from: &mut LinkedList<T>, to: &mut LinkedList<T>) {
while let Some(entry) = from.pop_back() {
entry.my_list.with_mut(|ptr| {
*ptr = List::Neither;
});
to.push_front(entry);
}
}
impl<'a, T> EntryInOneOfTheLists<'a, T> {
/// Remove this entry from the list it is in, returning the value associated
/// with the entry.
///
/// This consumes the value, since it is no longer guaranteed to be in a
/// list.
pub(crate) fn remove(self) -> T {
self.set.length -= 1;
{
let mut lock = self.set.lists.lock();
// Safety: We are holding the lock so there is no race, and we will
// remove the entry afterwards to uphold invariants.
let old_my_list = self.entry.my_list.with_mut(|ptr| unsafe {
let old_my_list = *ptr;
*ptr = List::Neither;
old_my_list
});
let list = match old_my_list {
List::Idle => &mut lock.idle,
List::Notified => &mut lock.notified,
// An entry in one of the lists is in one of the lists.
List::Neither => unreachable!(),
};
unsafe {
// Safety: We just checked that the entry is in this particular
// list.
list.remove(ListEntry::as_raw(&self.entry)).unwrap();
}
}
// By setting `my_list` to `Neither`, we have taken ownership of the
// value. We return it to the caller.
//
// Safety: We have a mutable reference to the `IdleNotifiedSet` that
// owns this entry, so we can use its permission to access the value.
self.entry
.value
.with_mut(|ptr| unsafe { ManuallyDrop::take(&mut *ptr) })
}
/// Access the value in this entry together with a context for its waker.
pub(crate) fn with_value_and_context<F, U>(&mut self, func: F) -> U
where
F: FnOnce(&mut T, &mut Context<'_>) -> U,
T: 'static,
{
let waker = waker_ref(&self.entry);
let mut context = Context::from_waker(&waker);
// Safety: We have a mutable reference to the `IdleNotifiedSet` that
// owns this entry, so we can use its permission to access the value.
self.entry
.value
.with_mut(|ptr| unsafe { func(&mut *ptr, &mut context) })
}
}
impl<T> Drop for IdleNotifiedSet<T> {
fn drop(&mut self) {
// Clear both lists.
self.drain(drop);
#[cfg(debug_assertions)]
if !std::thread::panicking() {
let lock = self.lists.lock();
assert!(lock.idle.is_empty());
assert!(lock.notified.is_empty());
}
}
}
impl<T: 'static> Wake for ListEntry<T> {
fn wake_by_ref(me: &Arc<Self>) {
let mut lock = me.parent.lock();
// Safety: We are holding the lock and we will update the lists to
// maintain invariants.
let old_my_list = me.my_list.with_mut(|ptr| unsafe {
let old_my_list = *ptr;
if old_my_list == List::Idle {
*ptr = List::Notified;
}
old_my_list
});
if old_my_list == List::Idle {
// We move ourself to the notified list.
let me = unsafe {
// Safety: We just checked that we are in this particular list.
lock.idle.remove(ListEntry::as_raw(me)).unwrap()
};
lock.notified.push_front(me);
if let Some(waker) = lock.waker.take() {
drop(lock);
waker.wake();
}
}
}
fn wake(me: Arc<Self>) {
Self::wake_by_ref(&me)
}
}
/// # Safety
///
/// `ListEntry` is forced to be !Unpin.
unsafe impl<T> linked_list::Link for ListEntry<T> {
type Handle = Arc<ListEntry<T>>;
type Target = ListEntry<T>;
fn as_raw(handle: &Self::Handle) -> NonNull<ListEntry<T>> {
let ptr: *const ListEntry<T> = Arc::as_ptr(handle);
// Safety: We can't get a null pointer from `Arc::as_ptr`.
unsafe { NonNull::new_unchecked(ptr as *mut ListEntry<T>) }
}
unsafe fn from_raw(ptr: NonNull<ListEntry<T>>) -> Arc<ListEntry<T>> {
Arc::from_raw(ptr.as_ptr())
}
unsafe fn pointers(
target: NonNull<ListEntry<T>>,
) -> NonNull<linked_list::Pointers<ListEntry<T>>> {
ListEntry::addr_of_pointers(target)
}
}
#[cfg(all(test, not(loom)))]
mod tests {
use crate::runtime::Builder;
use crate::task::JoinSet;
// A test that runs under miri.
//
// https://github.com/tokio-rs/tokio/pull/5693
#[test]
fn join_set_test() {
let rt = Builder::new_current_thread().build().unwrap();
let mut set = JoinSet::new();
set.spawn_on(futures::future::ready(()), rt.handle());
rt.block_on(set.join_next()).unwrap().unwrap();
}
}