| #![stable(feature = "rust1", since = "1.0.0")] |
| |
| //! Thread-safe reference-counting pointers. |
| //! |
| //! See the [`Arc<T>`][arc] documentation for more details. |
| //! |
| //! [arc]: struct.Arc.html |
| |
| use core::any::Any; |
| use core::array::LengthAtMost32; |
| use core::sync::atomic; |
| use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; |
| use core::borrow; |
| use core::fmt; |
| use core::cmp::{self, Ordering}; |
| use core::iter; |
| use core::intrinsics::abort; |
| use core::mem::{self, align_of, align_of_val, size_of_val}; |
| use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn}; |
| use core::pin::Pin; |
| use core::ptr::{self, NonNull}; |
| use core::marker::{Unpin, Unsize, PhantomData}; |
| use core::hash::{Hash, Hasher}; |
| use core::{isize, usize}; |
| use core::convert::{From, TryFrom}; |
| use core::slice::{self, from_raw_parts_mut}; |
| |
| use crate::alloc::{Global, Alloc, Layout, box_free, handle_alloc_error}; |
| use crate::boxed::Box; |
| use crate::rc::is_dangling; |
| use crate::string::String; |
| use crate::vec::Vec; |
| |
| #[cfg(test)] |
| mod tests; |
| |
| /// A soft limit on the amount of references that may be made to an `Arc`. |
| /// |
| /// Going above this limit will abort your program (although not |
| /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. |
| const MAX_REFCOUNT: usize = (isize::MAX) as usize; |
| |
| /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically |
| /// Reference Counted'. |
| /// |
| /// The type `Arc<T>` provides shared ownership of a value of type `T`, |
| /// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces |
| /// a new `Arc` instance, which points to the same value on the heap as the |
| /// source `Arc`, while increasing a reference count. When the last `Arc` |
| /// pointer to a given value is destroyed, the pointed-to value is also |
| /// destroyed. |
| /// |
| /// Shared references in Rust disallow mutation by default, and `Arc` is no |
| /// exception: you cannot generally obtain a mutable reference to something |
| /// inside an `Arc`. If you need to mutate through an `Arc`, use |
| /// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic] |
| /// types. |
| /// |
| /// ## Thread Safety |
| /// |
| /// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference |
| /// counting. This means that it is thread-safe. The disadvantage is that |
| /// atomic operations are more expensive than ordinary memory accesses. If you |
| /// are not sharing reference-counted values between threads, consider using |
| /// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the |
| /// compiler will catch any attempt to send an [`Rc<T>`] between threads. |
| /// However, a library might choose `Arc<T>` in order to give library consumers |
| /// more flexibility. |
| /// |
| /// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements |
| /// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an |
| /// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at |
| /// first: after all, isn't the point of `Arc<T>` thread safety? The key is |
| /// this: `Arc<T>` makes it thread safe to have multiple ownership of the same |
| /// data, but it doesn't add thread safety to its data. Consider |
| /// `Arc<`[`RefCell<T>`]`>`. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always |
| /// [`Send`], `Arc<`[`RefCell<T>`]`>` would be as well. But then we'd have a problem: |
| /// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using |
| /// non-atomic operations. |
| /// |
| /// In the end, this means that you may need to pair `Arc<T>` with some sort of |
| /// [`std::sync`] type, usually [`Mutex<T>`][mutex]. |
| /// |
| /// ## Breaking cycles with `Weak` |
| /// |
| /// The [`downgrade`][downgrade] method can be used to create a non-owning |
| /// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d |
| /// to an `Arc`, but this will return [`None`] if the value has already been |
| /// dropped. |
| /// |
| /// A cycle between `Arc` pointers will never be deallocated. For this reason, |
| /// [`Weak`][weak] is used to break cycles. For example, a tree could have |
| /// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak] |
| /// pointers from children back to their parents. |
| /// |
| /// # Cloning references |
| /// |
| /// Creating a new reference from an existing reference counted pointer is done using the |
| /// `Clone` trait implemented for [`Arc<T>`][arc] and [`Weak<T>`][weak]. |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// let foo = Arc::new(vec![1.0, 2.0, 3.0]); |
| /// // The two syntaxes below are equivalent. |
| /// let a = foo.clone(); |
| /// let b = Arc::clone(&foo); |
| /// // a, b, and foo are all Arcs that point to the same memory location |
| /// ``` |
| /// |
| /// ## `Deref` behavior |
| /// |
| /// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait), |
| /// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name |
| /// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated |
| /// functions, called using function-like syntax: |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// let my_arc = Arc::new(()); |
| /// |
| /// Arc::downgrade(&my_arc); |
| /// ``` |
| /// |
| /// [`Weak<T>`][weak] does not auto-dereference to `T`, because the value may have |
| /// already been destroyed. |
| /// |
| /// [arc]: struct.Arc.html |
| /// [weak]: struct.Weak.html |
| /// [`Rc<T>`]: ../../std/rc/struct.Rc.html |
| /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone |
| /// [mutex]: ../../std/sync/struct.Mutex.html |
| /// [rwlock]: ../../std/sync/struct.RwLock.html |
| /// [atomic]: ../../std/sync/atomic/index.html |
| /// [`Send`]: ../../std/marker/trait.Send.html |
| /// [`Sync`]: ../../std/marker/trait.Sync.html |
| /// [deref]: ../../std/ops/trait.Deref.html |
| /// [downgrade]: struct.Arc.html#method.downgrade |
| /// [upgrade]: struct.Weak.html#method.upgrade |
| /// [`None`]: ../../std/option/enum.Option.html#variant.None |
| /// [`RefCell<T>`]: ../../std/cell/struct.RefCell.html |
| /// [`std::sync`]: ../../std/sync/index.html |
| /// [`Arc::clone(&from)`]: #method.clone |
| /// |
| /// # Examples |
| /// |
| /// Sharing some immutable data between threads: |
| /// |
| // Note that we **do not** run these tests here. The windows builders get super |
| // unhappy if a thread outlives the main thread and then exits at the same time |
| // (something deadlocks) so we just avoid this entirely by not running these |
| // tests. |
| /// ```no_run |
| /// use std::sync::Arc; |
| /// use std::thread; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// for _ in 0..10 { |
| /// let five = Arc::clone(&five); |
| /// |
| /// thread::spawn(move || { |
| /// println!("{:?}", five); |
| /// }); |
| /// } |
| /// ``` |
| /// |
| /// Sharing a mutable [`AtomicUsize`]: |
| /// |
| /// [`AtomicUsize`]: ../../std/sync/atomic/struct.AtomicUsize.html |
| /// |
| /// ```no_run |
| /// use std::sync::Arc; |
| /// use std::sync::atomic::{AtomicUsize, Ordering}; |
| /// use std::thread; |
| /// |
| /// let val = Arc::new(AtomicUsize::new(5)); |
| /// |
| /// for _ in 0..10 { |
| /// let val = Arc::clone(&val); |
| /// |
| /// thread::spawn(move || { |
| /// let v = val.fetch_add(1, Ordering::SeqCst); |
| /// println!("{:?}", v); |
| /// }); |
| /// } |
| /// ``` |
| /// |
| /// See the [`rc` documentation][rc_examples] for more examples of reference |
| /// counting in general. |
| /// |
| /// [rc_examples]: ../../std/rc/index.html#examples |
| #[cfg_attr(not(test), lang = "arc")] |
| #[stable(feature = "rust1", since = "1.0.0")] |
| pub struct Arc<T: ?Sized> { |
| ptr: NonNull<ArcInner<T>>, |
| phantom: PhantomData<T>, |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {} |
| #[stable(feature = "rust1", since = "1.0.0")] |
| unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {} |
| |
| #[unstable(feature = "coerce_unsized", issue = "27732")] |
| impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {} |
| |
| #[unstable(feature = "dispatch_from_dyn", issue = "0")] |
| impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {} |
| |
| impl<T: ?Sized> Arc<T> { |
| fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self { |
| Self { |
| ptr, |
| phantom: PhantomData, |
| } |
| } |
| |
| unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self { |
| Self::from_inner(NonNull::new_unchecked(ptr)) |
| } |
| } |
| |
| /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the |
| /// managed value. The value is accessed by calling [`upgrade`] on the `Weak` |
| /// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`. |
| /// |
| /// Since a `Weak` reference does not count towards ownership, it will not |
| /// prevent the inner value from being dropped, and `Weak` itself makes no |
| /// guarantees about the value still being present and may return [`None`] |
| /// when [`upgrade`]d. |
| /// |
| /// A `Weak` pointer is useful for keeping a temporary reference to the value |
| /// within [`Arc`] without extending its lifetime. It is also used to prevent |
| /// circular references between [`Arc`] pointers, since mutual owning references |
| /// would never allow either [`Arc`] to be dropped. For example, a tree could |
| /// have strong [`Arc`] pointers from parent nodes to children, and `Weak` |
| /// pointers from children back to their parents. |
| /// |
| /// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`]. |
| /// |
| /// [`Arc`]: struct.Arc.html |
| /// [`Arc::downgrade`]: struct.Arc.html#method.downgrade |
| /// [`upgrade`]: struct.Weak.html#method.upgrade |
| /// [`Option`]: ../../std/option/enum.Option.html |
| /// [`None`]: ../../std/option/enum.Option.html#variant.None |
| #[stable(feature = "arc_weak", since = "1.4.0")] |
| pub struct Weak<T: ?Sized> { |
| // This is a `NonNull` to allow optimizing the size of this type in enums, |
| // but it is not necessarily a valid pointer. |
| // `Weak::new` sets this to `usize::MAX` so that it doesn’t need |
| // to allocate space on the heap. That's not a value a real pointer |
| // will ever have because RcBox has alignment at least 2. |
| ptr: NonNull<ArcInner<T>>, |
| } |
| |
| #[stable(feature = "arc_weak", since = "1.4.0")] |
| unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {} |
| #[stable(feature = "arc_weak", since = "1.4.0")] |
| unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {} |
| |
| #[unstable(feature = "coerce_unsized", issue = "27732")] |
| impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {} |
| #[unstable(feature = "dispatch_from_dyn", issue = "0")] |
| impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {} |
| |
| #[stable(feature = "arc_weak", since = "1.4.0")] |
| impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> { |
| fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| write!(f, "(Weak)") |
| } |
| } |
| |
| struct ArcInner<T: ?Sized> { |
| strong: atomic::AtomicUsize, |
| |
| // the value usize::MAX acts as a sentinel for temporarily "locking" the |
| // ability to upgrade weak pointers or downgrade strong ones; this is used |
| // to avoid races in `make_mut` and `get_mut`. |
| weak: atomic::AtomicUsize, |
| |
| data: T, |
| } |
| |
| unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {} |
| unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {} |
| |
| impl<T> Arc<T> { |
| /// Constructs a new `Arc<T>`. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// ``` |
| #[inline] |
| #[stable(feature = "rust1", since = "1.0.0")] |
| pub fn new(data: T) -> Arc<T> { |
| // Start the weak pointer count as 1 which is the weak pointer that's |
| // held by all the strong pointers (kinda), see std/rc.rs for more info |
| let x: Box<_> = box ArcInner { |
| strong: atomic::AtomicUsize::new(1), |
| weak: atomic::AtomicUsize::new(1), |
| data, |
| }; |
| Self::from_inner(Box::into_raw_non_null(x)) |
| } |
| |
| /// Constructs a new `Arc` with uninitialized contents. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// #![feature(new_uninit)] |
| /// #![feature(get_mut_unchecked)] |
| /// |
| /// use std::sync::Arc; |
| /// |
| /// let mut five = Arc::<u32>::new_uninit(); |
| /// |
| /// let five = unsafe { |
| /// // Deferred initialization: |
| /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5); |
| /// |
| /// five.assume_init() |
| /// }; |
| /// |
| /// assert_eq!(*five, 5) |
| /// ``` |
| #[unstable(feature = "new_uninit", issue = "63291")] |
| pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> { |
| unsafe { |
| Arc::from_ptr(Arc::allocate_for_layout( |
| Layout::new::<T>(), |
| |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>, |
| )) |
| } |
| } |
| |
| /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then |
| /// `data` will be pinned in memory and unable to be moved. |
| #[stable(feature = "pin", since = "1.33.0")] |
| pub fn pin(data: T) -> Pin<Arc<T>> { |
| unsafe { Pin::new_unchecked(Arc::new(data)) } |
| } |
| |
| /// Returns the contained value, if the `Arc` has exactly one strong reference. |
| /// |
| /// Otherwise, an [`Err`][result] is returned with the same `Arc` that was |
| /// passed in. |
| /// |
| /// This will succeed even if there are outstanding weak references. |
| /// |
| /// [result]: ../../std/result/enum.Result.html |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let x = Arc::new(3); |
| /// assert_eq!(Arc::try_unwrap(x), Ok(3)); |
| /// |
| /// let x = Arc::new(4); |
| /// let _y = Arc::clone(&x); |
| /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4); |
| /// ``` |
| #[inline] |
| #[stable(feature = "arc_unique", since = "1.4.0")] |
| pub fn try_unwrap(this: Self) -> Result<T, Self> { |
| // See `drop` for why all these atomics are like this |
| if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() { |
| return Err(this); |
| } |
| |
| atomic::fence(Acquire); |
| |
| unsafe { |
| let elem = ptr::read(&this.ptr.as_ref().data); |
| |
| // Make a weak pointer to clean up the implicit strong-weak reference |
| let _weak = Weak { ptr: this.ptr }; |
| mem::forget(this); |
| |
| Ok(elem) |
| } |
| } |
| } |
| |
| impl<T> Arc<[T]> { |
| /// Constructs a new reference-counted slice with uninitialized contents. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// #![feature(new_uninit)] |
| /// #![feature(get_mut_unchecked)] |
| /// |
| /// use std::sync::Arc; |
| /// |
| /// let mut values = Arc::<[u32]>::new_uninit_slice(3); |
| /// |
| /// let values = unsafe { |
| /// // Deferred initialization: |
| /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1); |
| /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2); |
| /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3); |
| /// |
| /// values.assume_init() |
| /// }; |
| /// |
| /// assert_eq!(*values, [1, 2, 3]) |
| /// ``` |
| #[unstable(feature = "new_uninit", issue = "63291")] |
| pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> { |
| unsafe { |
| Arc::from_ptr(Arc::allocate_for_slice(len)) |
| } |
| } |
| } |
| |
| impl<T> Arc<mem::MaybeUninit<T>> { |
| /// Converts to `Arc<T>`. |
| /// |
| /// # Safety |
| /// |
| /// As with [`MaybeUninit::assume_init`], |
| /// it is up to the caller to guarantee that the value |
| /// really is in an initialized state. |
| /// Calling this when the content is not yet fully initialized |
| /// causes immediate undefined behavior. |
| /// |
| /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// #![feature(new_uninit)] |
| /// #![feature(get_mut_unchecked)] |
| /// |
| /// use std::sync::Arc; |
| /// |
| /// let mut five = Arc::<u32>::new_uninit(); |
| /// |
| /// let five = unsafe { |
| /// // Deferred initialization: |
| /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5); |
| /// |
| /// five.assume_init() |
| /// }; |
| /// |
| /// assert_eq!(*five, 5) |
| /// ``` |
| #[unstable(feature = "new_uninit", issue = "63291")] |
| #[inline] |
| pub unsafe fn assume_init(self) -> Arc<T> { |
| Arc::from_inner(mem::ManuallyDrop::new(self).ptr.cast()) |
| } |
| } |
| |
| impl<T> Arc<[mem::MaybeUninit<T>]> { |
| /// Converts to `Arc<[T]>`. |
| /// |
| /// # Safety |
| /// |
| /// As with [`MaybeUninit::assume_init`], |
| /// it is up to the caller to guarantee that the value |
| /// really is in an initialized state. |
| /// Calling this when the content is not yet fully initialized |
| /// causes immediate undefined behavior. |
| /// |
| /// [`MaybeUninit::assume_init`]: ../../std/mem/union.MaybeUninit.html#method.assume_init |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// #![feature(new_uninit)] |
| /// #![feature(get_mut_unchecked)] |
| /// |
| /// use std::sync::Arc; |
| /// |
| /// let mut values = Arc::<[u32]>::new_uninit_slice(3); |
| /// |
| /// let values = unsafe { |
| /// // Deferred initialization: |
| /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1); |
| /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2); |
| /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3); |
| /// |
| /// values.assume_init() |
| /// }; |
| /// |
| /// assert_eq!(*values, [1, 2, 3]) |
| /// ``` |
| #[unstable(feature = "new_uninit", issue = "63291")] |
| #[inline] |
| pub unsafe fn assume_init(self) -> Arc<[T]> { |
| Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) |
| } |
| } |
| |
| impl<T: ?Sized> Arc<T> { |
| /// Consumes the `Arc`, returning the wrapped pointer. |
| /// |
| /// To avoid a memory leak the pointer must be converted back to an `Arc` using |
| /// [`Arc::from_raw`][from_raw]. |
| /// |
| /// [from_raw]: struct.Arc.html#method.from_raw |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let x = Arc::new("hello".to_owned()); |
| /// let x_ptr = Arc::into_raw(x); |
| /// assert_eq!(unsafe { &*x_ptr }, "hello"); |
| /// ``` |
| #[stable(feature = "rc_raw", since = "1.17.0")] |
| pub fn into_raw(this: Self) -> *const T { |
| let ptr: *const T = &*this; |
| mem::forget(this); |
| ptr |
| } |
| |
| /// Constructs an `Arc` from a raw pointer. |
| /// |
| /// The raw pointer must have been previously returned by a call to a |
| /// [`Arc::into_raw`][into_raw]. |
| /// |
| /// This function is unsafe because improper use may lead to memory problems. For example, a |
| /// double-free may occur if the function is called twice on the same raw pointer. |
| /// |
| /// [into_raw]: struct.Arc.html#method.into_raw |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let x = Arc::new("hello".to_owned()); |
| /// let x_ptr = Arc::into_raw(x); |
| /// |
| /// unsafe { |
| /// // Convert back to an `Arc` to prevent leak. |
| /// let x = Arc::from_raw(x_ptr); |
| /// assert_eq!(&*x, "hello"); |
| /// |
| /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe. |
| /// } |
| /// |
| /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! |
| /// ``` |
| #[stable(feature = "rc_raw", since = "1.17.0")] |
| pub unsafe fn from_raw(ptr: *const T) -> Self { |
| let offset = data_offset(ptr); |
| |
| // Reverse the offset to find the original ArcInner. |
| let fake_ptr = ptr as *mut ArcInner<T>; |
| let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)); |
| |
| Self::from_ptr(arc_ptr) |
| } |
| |
| /// Consumes the `Arc`, returning the wrapped pointer as `NonNull<T>`. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// #![feature(rc_into_raw_non_null)] |
| /// |
| /// use std::sync::Arc; |
| /// |
| /// let x = Arc::new("hello".to_owned()); |
| /// let ptr = Arc::into_raw_non_null(x); |
| /// let deref = unsafe { ptr.as_ref() }; |
| /// assert_eq!(deref, "hello"); |
| /// ``` |
| #[unstable(feature = "rc_into_raw_non_null", issue = "47336")] |
| #[inline] |
| pub fn into_raw_non_null(this: Self) -> NonNull<T> { |
| // safe because Arc guarantees its pointer is non-null |
| unsafe { NonNull::new_unchecked(Arc::into_raw(this) as *mut _) } |
| } |
| |
| /// Creates a new [`Weak`][weak] pointer to this value. |
| /// |
| /// [weak]: struct.Weak.html |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// let weak_five = Arc::downgrade(&five); |
| /// ``` |
| #[stable(feature = "arc_weak", since = "1.4.0")] |
| pub fn downgrade(this: &Self) -> Weak<T> { |
| // This Relaxed is OK because we're checking the value in the CAS |
| // below. |
| let mut cur = this.inner().weak.load(Relaxed); |
| |
| loop { |
| // check if the weak counter is currently "locked"; if so, spin. |
| if cur == usize::MAX { |
| cur = this.inner().weak.load(Relaxed); |
| continue; |
| } |
| |
| // NOTE: this code currently ignores the possibility of overflow |
| // into usize::MAX; in general both Rc and Arc need to be adjusted |
| // to deal with overflow. |
| |
| // Unlike with Clone(), we need this to be an Acquire read to |
| // synchronize with the write coming from `is_unique`, so that the |
| // events prior to that write happen before this read. |
| match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { |
| Ok(_) => { |
| // Make sure we do not create a dangling Weak |
| debug_assert!(!is_dangling(this.ptr)); |
| return Weak { ptr: this.ptr }; |
| } |
| Err(old) => cur = old, |
| } |
| } |
| } |
| |
| /// Gets the number of [`Weak`][weak] pointers to this value. |
| /// |
| /// [weak]: struct.Weak.html |
| /// |
| /// # Safety |
| /// |
| /// This method by itself is safe, but using it correctly requires extra care. |
| /// Another thread can change the weak count at any time, |
| /// including potentially between calling this method and acting on the result. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// let _weak_five = Arc::downgrade(&five); |
| /// |
| /// // This assertion is deterministic because we haven't shared |
| /// // the `Arc` or `Weak` between threads. |
| /// assert_eq!(1, Arc::weak_count(&five)); |
| /// ``` |
| #[inline] |
| #[stable(feature = "arc_counts", since = "1.15.0")] |
| pub fn weak_count(this: &Self) -> usize { |
| let cnt = this.inner().weak.load(SeqCst); |
| // If the weak count is currently locked, the value of the |
| // count was 0 just before taking the lock. |
| if cnt == usize::MAX { 0 } else { cnt - 1 } |
| } |
| |
| /// Gets the number of strong (`Arc`) pointers to this value. |
| /// |
| /// # Safety |
| /// |
| /// This method by itself is safe, but using it correctly requires extra care. |
| /// Another thread can change the strong count at any time, |
| /// including potentially between calling this method and acting on the result. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// let _also_five = Arc::clone(&five); |
| /// |
| /// // This assertion is deterministic because we haven't shared |
| /// // the `Arc` between threads. |
| /// assert_eq!(2, Arc::strong_count(&five)); |
| /// ``` |
| #[inline] |
| #[stable(feature = "arc_counts", since = "1.15.0")] |
| pub fn strong_count(this: &Self) -> usize { |
| this.inner().strong.load(SeqCst) |
| } |
| |
| #[inline] |
| fn inner(&self) -> &ArcInner<T> { |
| // This unsafety is ok because while this arc is alive we're guaranteed |
| // that the inner pointer is valid. Furthermore, we know that the |
| // `ArcInner` structure itself is `Sync` because the inner data is |
| // `Sync` as well, so we're ok loaning out an immutable pointer to these |
| // contents. |
| unsafe { self.ptr.as_ref() } |
| } |
| |
| // Non-inlined part of `drop`. |
| #[inline(never)] |
| unsafe fn drop_slow(&mut self) { |
| // Destroy the data at this time, even though we may not free the box |
| // allocation itself (there may still be weak pointers lying around). |
| ptr::drop_in_place(&mut self.ptr.as_mut().data); |
| |
| if self.inner().weak.fetch_sub(1, Release) == 1 { |
| atomic::fence(Acquire); |
| Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) |
| } |
| } |
| |
| #[inline] |
| #[stable(feature = "ptr_eq", since = "1.17.0")] |
| /// Returns `true` if the two `Arc`s point to the same value (not |
| /// just values that compare as equal). |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// let same_five = Arc::clone(&five); |
| /// let other_five = Arc::new(5); |
| /// |
| /// assert!(Arc::ptr_eq(&five, &same_five)); |
| /// assert!(!Arc::ptr_eq(&five, &other_five)); |
| /// ``` |
| pub fn ptr_eq(this: &Self, other: &Self) -> bool { |
| this.ptr.as_ptr() == other.ptr.as_ptr() |
| } |
| } |
| |
| impl<T: ?Sized> Arc<T> { |
| /// Allocates an `ArcInner<T>` with sufficient space for |
| /// a possibly-unsized value where the value has the layout provided. |
| /// |
| /// The function `mem_to_arcinner` is called with the data pointer |
| /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`. |
| unsafe fn allocate_for_layout( |
| value_layout: Layout, |
| mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T> |
| ) -> *mut ArcInner<T> { |
| // Calculate layout using the given value layout. |
| // Previously, layout was calculated on the expression |
| // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned |
| // reference (see #54908). |
| let layout = Layout::new::<ArcInner<()>>() |
| .extend(value_layout).unwrap().0 |
| .pad_to_align().unwrap(); |
| |
| let mem = Global.alloc(layout) |
| .unwrap_or_else(|_| handle_alloc_error(layout)); |
| |
| // Initialize the ArcInner |
| let inner = mem_to_arcinner(mem.as_ptr()); |
| debug_assert_eq!(Layout::for_value(&*inner), layout); |
| |
| ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1)); |
| ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1)); |
| |
| inner |
| } |
| |
| /// Allocates an `ArcInner<T>` with sufficient space for an unsized value. |
| unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> { |
| // Allocate for the `ArcInner<T>` using the given value. |
| Self::allocate_for_layout( |
| Layout::for_value(&*ptr), |
| |mem| set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>, |
| ) |
| } |
| |
| fn from_box(v: Box<T>) -> Arc<T> { |
| unsafe { |
| let box_unique = Box::into_unique(v); |
| let bptr = box_unique.as_ptr(); |
| |
| let value_size = size_of_val(&*bptr); |
| let ptr = Self::allocate_for_ptr(bptr); |
| |
| // Copy value as bytes |
| ptr::copy_nonoverlapping( |
| bptr as *const T as *const u8, |
| &mut (*ptr).data as *mut _ as *mut u8, |
| value_size); |
| |
| // Free the allocation without dropping its contents |
| box_free(box_unique); |
| |
| Self::from_ptr(ptr) |
| } |
| } |
| } |
| |
| impl<T> Arc<[T]> { |
| /// Allocates an `ArcInner<[T]>` with the given length. |
| unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> { |
| Self::allocate_for_layout( |
| Layout::array::<T>(len).unwrap(), |
| |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>, |
| ) |
| } |
| } |
| |
| /// Sets the data pointer of a `?Sized` raw pointer. |
| /// |
| /// For a slice/trait object, this sets the `data` field and leaves the rest |
| /// unchanged. For a sized raw pointer, this simply sets the pointer. |
| unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T { |
| ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8); |
| ptr |
| } |
| |
| impl<T> Arc<[T]> { |
| /// Copy elements from slice into newly allocated Arc<[T]> |
| /// |
| /// Unsafe because the caller must either take ownership or bind `T: Copy`. |
| unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> { |
| let ptr = Self::allocate_for_slice(v.len()); |
| |
| ptr::copy_nonoverlapping( |
| v.as_ptr(), |
| &mut (*ptr).data as *mut [T] as *mut T, |
| v.len()); |
| |
| Self::from_ptr(ptr) |
| } |
| |
| /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size. |
| /// |
| /// Behavior is undefined should the size be wrong. |
| unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> { |
| // Panic guard while cloning T elements. |
| // In the event of a panic, elements that have been written |
| // into the new ArcInner will be dropped, then the memory freed. |
| struct Guard<T> { |
| mem: NonNull<u8>, |
| elems: *mut T, |
| layout: Layout, |
| n_elems: usize, |
| } |
| |
| impl<T> Drop for Guard<T> { |
| fn drop(&mut self) { |
| unsafe { |
| let slice = from_raw_parts_mut(self.elems, self.n_elems); |
| ptr::drop_in_place(slice); |
| |
| Global.dealloc(self.mem.cast(), self.layout); |
| } |
| } |
| } |
| |
| let ptr = Self::allocate_for_slice(len); |
| |
| let mem = ptr as *mut _ as *mut u8; |
| let layout = Layout::for_value(&*ptr); |
| |
| // Pointer to first element |
| let elems = &mut (*ptr).data as *mut [T] as *mut T; |
| |
| let mut guard = Guard { |
| mem: NonNull::new_unchecked(mem), |
| elems, |
| layout, |
| n_elems: 0, |
| }; |
| |
| for (i, item) in iter.enumerate() { |
| ptr::write(elems.add(i), item); |
| guard.n_elems += 1; |
| } |
| |
| // All clear. Forget the guard so it doesn't free the new ArcInner. |
| mem::forget(guard); |
| |
| Self::from_ptr(ptr) |
| } |
| } |
| |
| /// Specialization trait used for `From<&[T]>`. |
| trait ArcFromSlice<T> { |
| fn from_slice(slice: &[T]) -> Self; |
| } |
| |
| impl<T: Clone> ArcFromSlice<T> for Arc<[T]> { |
| #[inline] |
| default fn from_slice(v: &[T]) -> Self { |
| unsafe { |
| Self::from_iter_exact(v.iter().cloned(), v.len()) |
| } |
| } |
| } |
| |
| impl<T: Copy> ArcFromSlice<T> for Arc<[T]> { |
| #[inline] |
| fn from_slice(v: &[T]) -> Self { |
| unsafe { Arc::copy_from_slice(v) } |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized> Clone for Arc<T> { |
| /// Makes a clone of the `Arc` pointer. |
| /// |
| /// This creates another pointer to the same inner value, increasing the |
| /// strong reference count. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// let _ = Arc::clone(&five); |
| /// ``` |
| #[inline] |
| fn clone(&self) -> Arc<T> { |
| // Using a relaxed ordering is alright here, as knowledge of the |
| // original reference prevents other threads from erroneously deleting |
| // the object. |
| // |
| // As explained in the [Boost documentation][1], Increasing the |
| // reference counter can always be done with memory_order_relaxed: New |
| // references to an object can only be formed from an existing |
| // reference, and passing an existing reference from one thread to |
| // another must already provide any required synchronization. |
| // |
| // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) |
| let old_size = self.inner().strong.fetch_add(1, Relaxed); |
| |
| // However we need to guard against massive refcounts in case someone |
| // is `mem::forget`ing Arcs. If we don't do this the count can overflow |
| // and users will use-after free. We racily saturate to `isize::MAX` on |
| // the assumption that there aren't ~2 billion threads incrementing |
| // the reference count at once. This branch will never be taken in |
| // any realistic program. |
| // |
| // We abort because such a program is incredibly degenerate, and we |
| // don't care to support it. |
| if old_size > MAX_REFCOUNT { |
| unsafe { |
| abort(); |
| } |
| } |
| |
| Self::from_inner(self.ptr) |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized> Deref for Arc<T> { |
| type Target = T; |
| |
| #[inline] |
| fn deref(&self) -> &T { |
| &self.inner().data |
| } |
| } |
| |
| #[unstable(feature = "receiver_trait", issue = "0")] |
| impl<T: ?Sized> Receiver for Arc<T> {} |
| |
| impl<T: Clone> Arc<T> { |
| /// Makes a mutable reference into the given `Arc`. |
| /// |
| /// If there are other `Arc` or [`Weak`][weak] pointers to the same value, |
| /// then `make_mut` will invoke [`clone`][clone] on the inner value to |
| /// ensure unique ownership. This is also referred to as clone-on-write. |
| /// |
| /// See also [`get_mut`][get_mut], which will fail rather than cloning. |
| /// |
| /// [weak]: struct.Weak.html |
| /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone |
| /// [get_mut]: struct.Arc.html#method.get_mut |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let mut data = Arc::new(5); |
| /// |
| /// *Arc::make_mut(&mut data) += 1; // Won't clone anything |
| /// let mut other_data = Arc::clone(&data); // Won't clone inner data |
| /// *Arc::make_mut(&mut data) += 1; // Clones inner data |
| /// *Arc::make_mut(&mut data) += 1; // Won't clone anything |
| /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything |
| /// |
| /// // Now `data` and `other_data` point to different values. |
| /// assert_eq!(*data, 8); |
| /// assert_eq!(*other_data, 12); |
| /// ``` |
| #[inline] |
| #[stable(feature = "arc_unique", since = "1.4.0")] |
| pub fn make_mut(this: &mut Self) -> &mut T { |
| // Note that we hold both a strong reference and a weak reference. |
| // Thus, releasing our strong reference only will not, by itself, cause |
| // the memory to be deallocated. |
| // |
| // Use Acquire to ensure that we see any writes to `weak` that happen |
| // before release writes (i.e., decrements) to `strong`. Since we hold a |
| // weak count, there's no chance the ArcInner itself could be |
| // deallocated. |
| if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() { |
| // Another strong pointer exists; clone |
| *this = Arc::new((**this).clone()); |
| } else if this.inner().weak.load(Relaxed) != 1 { |
| // Relaxed suffices in the above because this is fundamentally an |
| // optimization: we are always racing with weak pointers being |
| // dropped. Worst case, we end up allocated a new Arc unnecessarily. |
| |
| // We removed the last strong ref, but there are additional weak |
| // refs remaining. We'll move the contents to a new Arc, and |
| // invalidate the other weak refs. |
| |
| // Note that it is not possible for the read of `weak` to yield |
| // usize::MAX (i.e., locked), since the weak count can only be |
| // locked by a thread with a strong reference. |
| |
| // Materialize our own implicit weak pointer, so that it can clean |
| // up the ArcInner as needed. |
| let weak = Weak { ptr: this.ptr }; |
| |
| // mark the data itself as already deallocated |
| unsafe { |
| // there is no data race in the implicit write caused by `read` |
| // here (due to zeroing) because data is no longer accessed by |
| // other threads (due to there being no more strong refs at this |
| // point). |
| let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data)); |
| mem::swap(this, &mut swap); |
| mem::forget(swap); |
| } |
| } else { |
| // We were the sole reference of either kind; bump back up the |
| // strong ref count. |
| this.inner().strong.store(1, Release); |
| } |
| |
| // As with `get_mut()`, the unsafety is ok because our reference was |
| // either unique to begin with, or became one upon cloning the contents. |
| unsafe { |
| &mut this.ptr.as_mut().data |
| } |
| } |
| } |
| |
| impl<T: ?Sized> Arc<T> { |
| /// Returns a mutable reference to the inner value, if there are |
| /// no other `Arc` or [`Weak`][weak] pointers to the same value. |
| /// |
| /// Returns [`None`][option] otherwise, because it is not safe to |
| /// mutate a shared value. |
| /// |
| /// See also [`make_mut`][make_mut], which will [`clone`][clone] |
| /// the inner value when it's shared. |
| /// |
| /// [weak]: struct.Weak.html |
| /// [option]: ../../std/option/enum.Option.html |
| /// [make_mut]: struct.Arc.html#method.make_mut |
| /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let mut x = Arc::new(3); |
| /// *Arc::get_mut(&mut x).unwrap() = 4; |
| /// assert_eq!(*x, 4); |
| /// |
| /// let _y = Arc::clone(&x); |
| /// assert!(Arc::get_mut(&mut x).is_none()); |
| /// ``` |
| #[inline] |
| #[stable(feature = "arc_unique", since = "1.4.0")] |
| pub fn get_mut(this: &mut Self) -> Option<&mut T> { |
| if this.is_unique() { |
| // This unsafety is ok because we're guaranteed that the pointer |
| // returned is the *only* pointer that will ever be returned to T. Our |
| // reference count is guaranteed to be 1 at this point, and we required |
| // the Arc itself to be `mut`, so we're returning the only possible |
| // reference to the inner data. |
| unsafe { |
| Some(Arc::get_mut_unchecked(this)) |
| } |
| } else { |
| None |
| } |
| } |
| |
| /// Returns a mutable reference to the inner value, |
| /// without any check. |
| /// |
| /// See also [`get_mut`], which is safe and does appropriate checks. |
| /// |
| /// [`get_mut`]: struct.Arc.html#method.get_mut |
| /// |
| /// # Safety |
| /// |
| /// Any other `Arc` or [`Weak`] pointers to the same value must not be dereferenced |
| /// for the duration of the returned borrow. |
| /// This is trivially the case if no such pointers exist, |
| /// for example immediately after `Arc::new`. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// #![feature(get_mut_unchecked)] |
| /// |
| /// use std::sync::Arc; |
| /// |
| /// let mut x = Arc::new(String::new()); |
| /// unsafe { |
| /// Arc::get_mut_unchecked(&mut x).push_str("foo") |
| /// } |
| /// assert_eq!(*x, "foo"); |
| /// ``` |
| #[inline] |
| #[unstable(feature = "get_mut_unchecked", issue = "63292")] |
| pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T { |
| &mut this.ptr.as_mut().data |
| } |
| |
| /// Determine whether this is the unique reference (including weak refs) to |
| /// the underlying data. |
| /// |
| /// Note that this requires locking the weak ref count. |
| fn is_unique(&mut self) -> bool { |
| // lock the weak pointer count if we appear to be the sole weak pointer |
| // holder. |
| // |
| // The acquire label here ensures a happens-before relationship with any |
| // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements |
| // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded |
| // weak ref was never dropped, the CAS here will fail so we do not care to synchronize. |
| if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { |
| // This needs to be an `Acquire` to synchronize with the decrement of the `strong` |
| // counter in `drop` -- the only access that happens when any but the last reference |
| // is being dropped. |
| let unique = self.inner().strong.load(Acquire) == 1; |
| |
| // The release write here synchronizes with a read in `downgrade`, |
| // effectively preventing the above read of `strong` from happening |
| // after the write. |
| self.inner().weak.store(1, Release); // release the lock |
| unique |
| } else { |
| false |
| } |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> { |
| /// Drops the `Arc`. |
| /// |
| /// This will decrement the strong reference count. If the strong reference |
| /// count reaches zero then the only other references (if any) are |
| /// [`Weak`], so we `drop` the inner value. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// struct Foo; |
| /// |
| /// impl Drop for Foo { |
| /// fn drop(&mut self) { |
| /// println!("dropped!"); |
| /// } |
| /// } |
| /// |
| /// let foo = Arc::new(Foo); |
| /// let foo2 = Arc::clone(&foo); |
| /// |
| /// drop(foo); // Doesn't print anything |
| /// drop(foo2); // Prints "dropped!" |
| /// ``` |
| /// |
| /// [`Weak`]: ../../std/sync/struct.Weak.html |
| #[inline] |
| fn drop(&mut self) { |
| // Because `fetch_sub` is already atomic, we do not need to synchronize |
| // with other threads unless we are going to delete the object. This |
| // same logic applies to the below `fetch_sub` to the `weak` count. |
| if self.inner().strong.fetch_sub(1, Release) != 1 { |
| return; |
| } |
| |
| // This fence is needed to prevent reordering of use of the data and |
| // deletion of the data. Because it is marked `Release`, the decreasing |
| // of the reference count synchronizes with this `Acquire` fence. This |
| // means that use of the data happens before decreasing the reference |
| // count, which happens before this fence, which happens before the |
| // deletion of the data. |
| // |
| // As explained in the [Boost documentation][1], |
| // |
| // > It is important to enforce any possible access to the object in one |
| // > thread (through an existing reference) to *happen before* deleting |
| // > the object in a different thread. This is achieved by a "release" |
| // > operation after dropping a reference (any access to the object |
| // > through this reference must obviously happened before), and an |
| // > "acquire" operation before deleting the object. |
| // |
| // In particular, while the contents of an Arc are usually immutable, it's |
| // possible to have interior writes to something like a Mutex<T>. Since a |
| // Mutex is not acquired when it is deleted, we can't rely on its |
| // synchronization logic to make writes in thread A visible to a destructor |
| // running in thread B. |
| // |
| // Also note that the Acquire fence here could probably be replaced with an |
| // Acquire load, which could improve performance in highly-contended |
| // situations. See [2]. |
| // |
| // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) |
| // [2]: (https://github.com/rust-lang/rust/pull/41714) |
| atomic::fence(Acquire); |
| |
| unsafe { |
| self.drop_slow(); |
| } |
| } |
| } |
| |
| impl Arc<dyn Any + Send + Sync> { |
| #[inline] |
| #[stable(feature = "rc_downcast", since = "1.29.0")] |
| /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::any::Any; |
| /// use std::sync::Arc; |
| /// |
| /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) { |
| /// if let Ok(string) = value.downcast::<String>() { |
| /// println!("String ({}): {}", string.len(), string); |
| /// } |
| /// } |
| /// |
| /// fn main() { |
| /// let my_string = "Hello World".to_string(); |
| /// print_if_string(Arc::new(my_string)); |
| /// print_if_string(Arc::new(0i8)); |
| /// } |
| /// ``` |
| pub fn downcast<T>(self) -> Result<Arc<T>, Self> |
| where |
| T: Any + Send + Sync + 'static, |
| { |
| if (*self).is::<T>() { |
| let ptr = self.ptr.cast::<ArcInner<T>>(); |
| mem::forget(self); |
| Ok(Arc::from_inner(ptr)) |
| } else { |
| Err(self) |
| } |
| } |
| } |
| |
| impl<T> Weak<T> { |
| /// Constructs a new `Weak<T>`, without allocating any memory. |
| /// Calling [`upgrade`] on the return value always gives [`None`]. |
| /// |
| /// [`upgrade`]: struct.Weak.html#method.upgrade |
| /// [`None`]: ../../std/option/enum.Option.html#variant.None |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Weak; |
| /// |
| /// let empty: Weak<i64> = Weak::new(); |
| /// assert!(empty.upgrade().is_none()); |
| /// ``` |
| #[stable(feature = "downgraded_weak", since = "1.10.0")] |
| pub fn new() -> Weak<T> { |
| Weak { |
| ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0"), |
| } |
| } |
| |
| /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`. |
| /// |
| /// It is up to the caller to ensure that the object is still alive when accessing it through |
| /// the pointer. |
| /// |
| /// The pointer may be [`null`] or be dangling in case the object has already been destroyed. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// #![feature(weak_into_raw)] |
| /// |
| /// use std::sync::Arc; |
| /// use std::ptr; |
| /// |
| /// let strong = Arc::new("hello".to_owned()); |
| /// let weak = Arc::downgrade(&strong); |
| /// // Both point to the same object |
| /// assert!(ptr::eq(&*strong, weak.as_raw())); |
| /// // The strong here keeps it alive, so we can still access the object. |
| /// assert_eq!("hello", unsafe { &*weak.as_raw() }); |
| /// |
| /// drop(strong); |
| /// // But not any more. We can do weak.as_raw(), but accessing the pointer would lead to |
| /// // undefined behaviour. |
| /// // assert_eq!("hello", unsafe { &*weak.as_raw() }); |
| /// ``` |
| /// |
| /// [`null`]: ../../std/ptr/fn.null.html |
| #[unstable(feature = "weak_into_raw", issue = "60728")] |
| pub fn as_raw(&self) -> *const T { |
| match self.inner() { |
| None => ptr::null(), |
| Some(inner) => { |
| let offset = data_offset_sized::<T>(); |
| let ptr = inner as *const ArcInner<T>; |
| // Note: while the pointer we create may already point to dropped value, the |
| // allocation still lives (it must hold the weak point as long as we are alive). |
| // Therefore, the offset is OK to do, it won't get out of the allocation. |
| let ptr = unsafe { (ptr as *const u8).offset(offset) }; |
| ptr as *const T |
| } |
| } |
| } |
| |
| /// Consumes the `Weak<T>` and turns it into a raw pointer. |
| /// |
| /// This converts the weak pointer into a raw pointer, preserving the original weak count. It |
| /// can be turned back into the `Weak<T>` with [`from_raw`]. |
| /// |
| /// The same restrictions of accessing the target of the pointer as with |
| /// [`as_raw`] apply. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// #![feature(weak_into_raw)] |
| /// |
| /// use std::sync::{Arc, Weak}; |
| /// |
| /// let strong = Arc::new("hello".to_owned()); |
| /// let weak = Arc::downgrade(&strong); |
| /// let raw = weak.into_raw(); |
| /// |
| /// assert_eq!(1, Arc::weak_count(&strong)); |
| /// assert_eq!("hello", unsafe { &*raw }); |
| /// |
| /// drop(unsafe { Weak::from_raw(raw) }); |
| /// assert_eq!(0, Arc::weak_count(&strong)); |
| /// ``` |
| /// |
| /// [`from_raw`]: struct.Weak.html#method.from_raw |
| /// [`as_raw`]: struct.Weak.html#method.as_raw |
| #[unstable(feature = "weak_into_raw", issue = "60728")] |
| pub fn into_raw(self) -> *const T { |
| let result = self.as_raw(); |
| mem::forget(self); |
| result |
| } |
| |
| /// Converts a raw pointer previously created by [`into_raw`] back into |
| /// `Weak<T>`. |
| /// |
| /// This can be used to safely get a strong reference (by calling [`upgrade`] |
| /// later) or to deallocate the weak count by dropping the `Weak<T>`. |
| /// |
| /// It takes ownership of one weak count. In case a [`null`] is passed, a dangling [`Weak`] is |
| /// returned. |
| /// |
| /// # Safety |
| /// |
| /// The pointer must represent one valid weak count. In other words, it must point to `T` which |
| /// is or *was* managed by an [`Arc`] and the weak count of that [`Arc`] must not have reached |
| /// 0. It is allowed for the strong count to be 0. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// #![feature(weak_into_raw)] |
| /// |
| /// use std::sync::{Arc, Weak}; |
| /// |
| /// let strong = Arc::new("hello".to_owned()); |
| /// |
| /// let raw_1 = Arc::downgrade(&strong).into_raw(); |
| /// let raw_2 = Arc::downgrade(&strong).into_raw(); |
| /// |
| /// assert_eq!(2, Arc::weak_count(&strong)); |
| /// |
| /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap()); |
| /// assert_eq!(1, Arc::weak_count(&strong)); |
| /// |
| /// drop(strong); |
| /// |
| /// // Decrement the last weak count. |
| /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none()); |
| /// ``` |
| /// |
| /// [`null`]: ../../std/ptr/fn.null.html |
| /// [`into_raw`]: struct.Weak.html#method.into_raw |
| /// [`upgrade`]: struct.Weak.html#method.upgrade |
| /// [`Weak`]: struct.Weak.html |
| /// [`Arc`]: struct.Arc.html |
| #[unstable(feature = "weak_into_raw", issue = "60728")] |
| pub unsafe fn from_raw(ptr: *const T) -> Self { |
| if ptr.is_null() { |
| Self::new() |
| } else { |
| // See Arc::from_raw for details |
| let offset = data_offset(ptr); |
| let fake_ptr = ptr as *mut ArcInner<T>; |
| let ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)); |
| Weak { |
| ptr: NonNull::new(ptr).expect("Invalid pointer passed to from_raw"), |
| } |
| } |
| } |
| } |
| |
| impl<T: ?Sized> Weak<T> { |
| /// Attempts to upgrade the `Weak` pointer to an [`Arc`], extending |
| /// the lifetime of the value if successful. |
| /// |
| /// Returns [`None`] if the value has since been dropped. |
| /// |
| /// [`Arc`]: struct.Arc.html |
| /// [`None`]: ../../std/option/enum.Option.html#variant.None |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// let weak_five = Arc::downgrade(&five); |
| /// |
| /// let strong_five: Option<Arc<_>> = weak_five.upgrade(); |
| /// assert!(strong_five.is_some()); |
| /// |
| /// // Destroy all strong pointers. |
| /// drop(strong_five); |
| /// drop(five); |
| /// |
| /// assert!(weak_five.upgrade().is_none()); |
| /// ``` |
| #[stable(feature = "arc_weak", since = "1.4.0")] |
| pub fn upgrade(&self) -> Option<Arc<T>> { |
| // We use a CAS loop to increment the strong count instead of a |
| // fetch_add because once the count hits 0 it must never be above 0. |
| let inner = self.inner()?; |
| |
| // Relaxed load because any write of 0 that we can observe |
| // leaves the field in a permanently zero state (so a |
| // "stale" read of 0 is fine), and any other value is |
| // confirmed via the CAS below. |
| let mut n = inner.strong.load(Relaxed); |
| |
| loop { |
| if n == 0 { |
| return None; |
| } |
| |
| // See comments in `Arc::clone` for why we do this (for `mem::forget`). |
| if n > MAX_REFCOUNT { |
| unsafe { |
| abort(); |
| } |
| } |
| |
| // Relaxed is valid for the same reason it is on Arc's Clone impl |
| match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) { |
| Ok(_) => return Some(Arc::from_inner(self.ptr)), // null checked above |
| Err(old) => n = old, |
| } |
| } |
| } |
| |
| /// Gets the number of strong (`Arc`) pointers pointing to this value. |
| /// |
| /// If `self` was created using [`Weak::new`], this will return 0. |
| /// |
| /// [`Weak::new`]: #method.new |
| #[unstable(feature = "weak_counts", issue = "57977")] |
| pub fn strong_count(&self) -> usize { |
| if let Some(inner) = self.inner() { |
| inner.strong.load(SeqCst) |
| } else { |
| 0 |
| } |
| } |
| |
| /// Gets an approximation of the number of `Weak` pointers pointing to this |
| /// value. |
| /// |
| /// If `self` was created using [`Weak::new`], this will return 0. If not, |
| /// the returned value is at least 1, since `self` still points to the |
| /// value. |
| /// |
| /// # Accuracy |
| /// |
| /// Due to implementation details, the returned value can be off by 1 in |
| /// either direction when other threads are manipulating any `Arc`s or |
| /// `Weak`s pointing to the same value. |
| /// |
| /// [`Weak::new`]: #method.new |
| #[unstable(feature = "weak_counts", issue = "57977")] |
| pub fn weak_count(&self) -> Option<usize> { |
| // Due to the implicit weak pointer added when any strong pointers are |
| // around, we cannot implement `weak_count` correctly since it |
| // necessarily requires accessing the strong count and weak count in an |
| // unsynchronized fashion. So this version is a bit racy. |
| self.inner().map(|inner| { |
| let strong = inner.strong.load(SeqCst); |
| let weak = inner.weak.load(SeqCst); |
| if strong == 0 { |
| // If the last `Arc` has *just* been dropped, it might not yet |
| // have removed the implicit weak count, so the value we get |
| // here might be 1 too high. |
| weak |
| } else { |
| // As long as there's still at least 1 `Arc` around, subtract |
| // the implicit weak pointer. |
| // Note that the last `Arc` might get dropped between the 2 |
| // loads we do above, removing the implicit weak pointer. This |
| // means that the value might be 1 too low here. In order to not |
| // return 0 here (which would happen if we're the only weak |
| // pointer), we guard against that specifically. |
| cmp::max(1, weak - 1) |
| } |
| }) |
| } |
| |
| /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`, |
| /// (i.e., when this `Weak` was created by `Weak::new`). |
| #[inline] |
| fn inner(&self) -> Option<&ArcInner<T>> { |
| if is_dangling(self.ptr) { |
| None |
| } else { |
| Some(unsafe { self.ptr.as_ref() }) |
| } |
| } |
| |
| /// Returns `true` if the two `Weak`s point to the same value (not just |
| /// values that compare as equal), or if both don't point to any value |
| /// (because they were created with `Weak::new()`). |
| /// |
| /// # Notes |
| /// |
| /// Since this compares pointers it means that `Weak::new()` will equal each |
| /// other, even though they don't point to any value. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let first_rc = Arc::new(5); |
| /// let first = Arc::downgrade(&first_rc); |
| /// let second = Arc::downgrade(&first_rc); |
| /// |
| /// assert!(first.ptr_eq(&second)); |
| /// |
| /// let third_rc = Arc::new(5); |
| /// let third = Arc::downgrade(&third_rc); |
| /// |
| /// assert!(!first.ptr_eq(&third)); |
| /// ``` |
| /// |
| /// Comparing `Weak::new`. |
| /// |
| /// ``` |
| /// use std::sync::{Arc, Weak}; |
| /// |
| /// let first = Weak::new(); |
| /// let second = Weak::new(); |
| /// assert!(first.ptr_eq(&second)); |
| /// |
| /// let third_rc = Arc::new(()); |
| /// let third = Arc::downgrade(&third_rc); |
| /// assert!(!first.ptr_eq(&third)); |
| /// ``` |
| #[inline] |
| #[stable(feature = "weak_ptr_eq", since = "1.39.0")] |
| pub fn ptr_eq(&self, other: &Self) -> bool { |
| self.ptr.as_ptr() == other.ptr.as_ptr() |
| } |
| } |
| |
| #[stable(feature = "arc_weak", since = "1.4.0")] |
| impl<T: ?Sized> Clone for Weak<T> { |
| /// Makes a clone of the `Weak` pointer that points to the same value. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::{Arc, Weak}; |
| /// |
| /// let weak_five = Arc::downgrade(&Arc::new(5)); |
| /// |
| /// let _ = Weak::clone(&weak_five); |
| /// ``` |
| #[inline] |
| fn clone(&self) -> Weak<T> { |
| let inner = if let Some(inner) = self.inner() { |
| inner |
| } else { |
| return Weak { ptr: self.ptr }; |
| }; |
| // See comments in Arc::clone() for why this is relaxed. This can use a |
| // fetch_add (ignoring the lock) because the weak count is only locked |
| // where are *no other* weak pointers in existence. (So we can't be |
| // running this code in that case). |
| let old_size = inner.weak.fetch_add(1, Relaxed); |
| |
| // See comments in Arc::clone() for why we do this (for mem::forget). |
| if old_size > MAX_REFCOUNT { |
| unsafe { |
| abort(); |
| } |
| } |
| |
| return Weak { ptr: self.ptr }; |
| } |
| } |
| |
| #[stable(feature = "downgraded_weak", since = "1.10.0")] |
| impl<T> Default for Weak<T> { |
| /// Constructs a new `Weak<T>`, without allocating memory. |
| /// Calling [`upgrade`] on the return value always |
| /// gives [`None`]. |
| /// |
| /// [`None`]: ../../std/option/enum.Option.html#variant.None |
| /// [`upgrade`]: ../../std/sync/struct.Weak.html#method.upgrade |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Weak; |
| /// |
| /// let empty: Weak<i64> = Default::default(); |
| /// assert!(empty.upgrade().is_none()); |
| /// ``` |
| fn default() -> Weak<T> { |
| Weak::new() |
| } |
| } |
| |
| #[stable(feature = "arc_weak", since = "1.4.0")] |
| impl<T: ?Sized> Drop for Weak<T> { |
| /// Drops the `Weak` pointer. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::{Arc, Weak}; |
| /// |
| /// struct Foo; |
| /// |
| /// impl Drop for Foo { |
| /// fn drop(&mut self) { |
| /// println!("dropped!"); |
| /// } |
| /// } |
| /// |
| /// let foo = Arc::new(Foo); |
| /// let weak_foo = Arc::downgrade(&foo); |
| /// let other_weak_foo = Weak::clone(&weak_foo); |
| /// |
| /// drop(weak_foo); // Doesn't print anything |
| /// drop(foo); // Prints "dropped!" |
| /// |
| /// assert!(other_weak_foo.upgrade().is_none()); |
| /// ``` |
| fn drop(&mut self) { |
| // If we find out that we were the last weak pointer, then its time to |
| // deallocate the data entirely. See the discussion in Arc::drop() about |
| // the memory orderings |
| // |
| // It's not necessary to check for the locked state here, because the |
| // weak count can only be locked if there was precisely one weak ref, |
| // meaning that drop could only subsequently run ON that remaining weak |
| // ref, which can only happen after the lock is released. |
| let inner = if let Some(inner) = self.inner() { |
| inner |
| } else { |
| return |
| }; |
| |
| if inner.weak.fetch_sub(1, Release) == 1 { |
| atomic::fence(Acquire); |
| unsafe { |
| Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) |
| } |
| } |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| trait ArcEqIdent<T: ?Sized + PartialEq> { |
| fn eq(&self, other: &Arc<T>) -> bool; |
| fn ne(&self, other: &Arc<T>) -> bool; |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> { |
| #[inline] |
| default fn eq(&self, other: &Arc<T>) -> bool { |
| **self == **other |
| } |
| #[inline] |
| default fn ne(&self, other: &Arc<T>) -> bool { |
| **self != **other |
| } |
| } |
| |
| /// We're doing this specialization here, and not as a more general optimization on `&T`, because it |
| /// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to |
| /// store large values, that are slow to clone, but also heavy to check for equality, causing this |
| /// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to |
| /// the same value, than two `&T`s. |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized + Eq> ArcEqIdent<T> for Arc<T> { |
| #[inline] |
| fn eq(&self, other: &Arc<T>) -> bool { |
| Arc::ptr_eq(self, other) || **self == **other |
| } |
| |
| #[inline] |
| fn ne(&self, other: &Arc<T>) -> bool { |
| !Arc::ptr_eq(self, other) && **self != **other |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized + PartialEq> PartialEq for Arc<T> { |
| /// Equality for two `Arc`s. |
| /// |
| /// Two `Arc`s are equal if their inner values are equal. |
| /// |
| /// If `T` also implements `Eq`, two `Arc`s that point to the same value are |
| /// always equal. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// assert!(five == Arc::new(5)); |
| /// ``` |
| #[inline] |
| fn eq(&self, other: &Arc<T>) -> bool { |
| ArcEqIdent::eq(self, other) |
| } |
| |
| /// Inequality for two `Arc`s. |
| /// |
| /// Two `Arc`s are unequal if their inner values are unequal. |
| /// |
| /// If `T` also implements `Eq`, two `Arc`s that point to the same value are |
| /// never unequal. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// assert!(five != Arc::new(6)); |
| /// ``` |
| #[inline] |
| fn ne(&self, other: &Arc<T>) -> bool { |
| ArcEqIdent::ne(self, other) |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> { |
| /// Partial comparison for two `Arc`s. |
| /// |
| /// The two are compared by calling `partial_cmp()` on their inner values. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// use std::cmp::Ordering; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6))); |
| /// ``` |
| fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { |
| (**self).partial_cmp(&**other) |
| } |
| |
| /// Less-than comparison for two `Arc`s. |
| /// |
| /// The two are compared by calling `<` on their inner values. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// assert!(five < Arc::new(6)); |
| /// ``` |
| fn lt(&self, other: &Arc<T>) -> bool { |
| *(*self) < *(*other) |
| } |
| |
| /// 'Less than or equal to' comparison for two `Arc`s. |
| /// |
| /// The two are compared by calling `<=` on their inner values. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// assert!(five <= Arc::new(5)); |
| /// ``` |
| fn le(&self, other: &Arc<T>) -> bool { |
| *(*self) <= *(*other) |
| } |
| |
| /// Greater-than comparison for two `Arc`s. |
| /// |
| /// The two are compared by calling `>` on their inner values. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// assert!(five > Arc::new(4)); |
| /// ``` |
| fn gt(&self, other: &Arc<T>) -> bool { |
| *(*self) > *(*other) |
| } |
| |
| /// 'Greater than or equal to' comparison for two `Arc`s. |
| /// |
| /// The two are compared by calling `>=` on their inner values. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// assert!(five >= Arc::new(5)); |
| /// ``` |
| fn ge(&self, other: &Arc<T>) -> bool { |
| *(*self) >= *(*other) |
| } |
| } |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized + Ord> Ord for Arc<T> { |
| /// Comparison for two `Arc`s. |
| /// |
| /// The two are compared by calling `cmp()` on their inner values. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// use std::cmp::Ordering; |
| /// |
| /// let five = Arc::new(5); |
| /// |
| /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6))); |
| /// ``` |
| fn cmp(&self, other: &Arc<T>) -> Ordering { |
| (**self).cmp(&**other) |
| } |
| } |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized + Eq> Eq for Arc<T> {} |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> { |
| fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| fmt::Display::fmt(&**self, f) |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> { |
| fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| fmt::Debug::fmt(&**self, f) |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized> fmt::Pointer for Arc<T> { |
| fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| fmt::Pointer::fmt(&(&**self as *const T), f) |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: Default> Default for Arc<T> { |
| /// Creates a new `Arc<T>`, with the `Default` value for `T`. |
| /// |
| /// # Examples |
| /// |
| /// ``` |
| /// use std::sync::Arc; |
| /// |
| /// let x: Arc<i32> = Default::default(); |
| /// assert_eq!(*x, 0); |
| /// ``` |
| fn default() -> Arc<T> { |
| Arc::new(Default::default()) |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized + Hash> Hash for Arc<T> { |
| fn hash<H: Hasher>(&self, state: &mut H) { |
| (**self).hash(state) |
| } |
| } |
| |
| #[stable(feature = "from_for_ptrs", since = "1.6.0")] |
| impl<T> From<T> for Arc<T> { |
| fn from(t: T) -> Self { |
| Arc::new(t) |
| } |
| } |
| |
| #[stable(feature = "shared_from_slice", since = "1.21.0")] |
| impl<T: Clone> From<&[T]> for Arc<[T]> { |
| #[inline] |
| fn from(v: &[T]) -> Arc<[T]> { |
| <Self as ArcFromSlice<T>>::from_slice(v) |
| } |
| } |
| |
| #[stable(feature = "shared_from_slice", since = "1.21.0")] |
| impl From<&str> for Arc<str> { |
| #[inline] |
| fn from(v: &str) -> Arc<str> { |
| let arc = Arc::<[u8]>::from(v.as_bytes()); |
| unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) } |
| } |
| } |
| |
| #[stable(feature = "shared_from_slice", since = "1.21.0")] |
| impl From<String> for Arc<str> { |
| #[inline] |
| fn from(v: String) -> Arc<str> { |
| Arc::from(&v[..]) |
| } |
| } |
| |
| #[stable(feature = "shared_from_slice", since = "1.21.0")] |
| impl<T: ?Sized> From<Box<T>> for Arc<T> { |
| #[inline] |
| fn from(v: Box<T>) -> Arc<T> { |
| Arc::from_box(v) |
| } |
| } |
| |
| #[stable(feature = "shared_from_slice", since = "1.21.0")] |
| impl<T> From<Vec<T>> for Arc<[T]> { |
| #[inline] |
| fn from(mut v: Vec<T>) -> Arc<[T]> { |
| unsafe { |
| let arc = Arc::copy_from_slice(&v); |
| |
| // Allow the Vec to free its memory, but not destroy its contents |
| v.set_len(0); |
| |
| arc |
| } |
| } |
| } |
| |
| #[unstable(feature = "boxed_slice_try_from", issue = "0")] |
| impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> |
| where |
| [T; N]: LengthAtMost32, |
| { |
| type Error = Arc<[T]>; |
| |
| fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> { |
| if boxed_slice.len() == N { |
| Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) }) |
| } else { |
| Err(boxed_slice) |
| } |
| } |
| } |
| |
| #[stable(feature = "shared_from_iter", since = "1.37.0")] |
| impl<T> iter::FromIterator<T> for Arc<[T]> { |
| /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`. |
| /// |
| /// # Performance characteristics |
| /// |
| /// ## The general case |
| /// |
| /// In the general case, collecting into `Arc<[T]>` is done by first |
| /// collecting into a `Vec<T>`. That is, when writing the following: |
| /// |
| /// ```rust |
| /// # use std::sync::Arc; |
| /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect(); |
| /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]); |
| /// ``` |
| /// |
| /// this behaves as if we wrote: |
| /// |
| /// ```rust |
| /// # use std::sync::Arc; |
| /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0) |
| /// .collect::<Vec<_>>() // The first set of allocations happens here. |
| /// .into(); // A second allocation for `Arc<[T]>` happens here. |
| /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]); |
| /// ``` |
| /// |
| /// This will allocate as many times as needed for constructing the `Vec<T>` |
| /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`. |
| /// |
| /// ## Iterators of known length |
| /// |
| /// When your `Iterator` implements `TrustedLen` and is of an exact size, |
| /// a single allocation will be made for the `Arc<[T]>`. For example: |
| /// |
| /// ```rust |
| /// # use std::sync::Arc; |
| /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here. |
| /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>()); |
| /// ``` |
| fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self { |
| ArcFromIter::from_iter(iter.into_iter()) |
| } |
| } |
| |
| /// Specialization trait used for collecting into `Arc<[T]>`. |
| trait ArcFromIter<T, I> { |
| fn from_iter(iter: I) -> Self; |
| } |
| |
| impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> { |
| default fn from_iter(iter: I) -> Self { |
| iter.collect::<Vec<T>>().into() |
| } |
| } |
| |
| impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> { |
| default fn from_iter(iter: I) -> Self { |
| // This is the case for a `TrustedLen` iterator. |
| let (low, high) = iter.size_hint(); |
| if let Some(high) = high { |
| debug_assert_eq!( |
| low, high, |
| "TrustedLen iterator's size hint is not exact: {:?}", |
| (low, high) |
| ); |
| |
| unsafe { |
| // SAFETY: We need to ensure that the iterator has an exact length and we have. |
| Arc::from_iter_exact(iter, low) |
| } |
| } else { |
| // Fall back to normal implementation. |
| iter.collect::<Vec<T>>().into() |
| } |
| } |
| } |
| |
| impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> { |
| fn from_iter(iter: slice::Iter<'a, T>) -> Self { |
| // Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`. |
| // |
| // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping` |
| // which is even more performant. |
| // |
| // In the fall-back case we have `T: Clone`. This is still better |
| // than the `TrustedLen` implementation as slices have a known length |
| // and so we get to avoid calling `size_hint` and avoid the branching. |
| iter.as_slice().into() |
| } |
| } |
| |
| #[stable(feature = "rust1", since = "1.0.0")] |
| impl<T: ?Sized> borrow::Borrow<T> for Arc<T> { |
| fn borrow(&self) -> &T { |
| &**self |
| } |
| } |
| |
| #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] |
| impl<T: ?Sized> AsRef<T> for Arc<T> { |
| fn as_ref(&self) -> &T { |
| &**self |
| } |
| } |
| |
| #[stable(feature = "pin", since = "1.33.0")] |
| impl<T: ?Sized> Unpin for Arc<T> { } |
| |
| /// Computes the offset of the data field within `ArcInner`. |
| unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize { |
| // Align the unsized value to the end of the `ArcInner`. |
| // Because it is `?Sized`, it will always be the last field in memory. |
| data_offset_align(align_of_val(&*ptr)) |
| } |
| |
| /// Computes the offset of the data field within `ArcInner`. |
| /// |
| /// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`. |
| fn data_offset_sized<T>() -> isize { |
| data_offset_align(align_of::<T>()) |
| } |
| |
| #[inline] |
| fn data_offset_align(align: usize) -> isize { |
| let layout = Layout::new::<ArcInner<()>>(); |
| (layout.size() + layout.padding_needed_for(align)) as isize |
| } |