pub unsafe auto trait Send { }
Expand description
Types that can be transferred across thread boundaries.
This trait is automatically implemented when the compiler determines it’s appropriate.
An example of a non-Send
type is the reference-counting pointer
rc::Rc
. If two threads attempt to clone Rc
s that point to the same
reference-counted value, they might try to update the reference count at the
same time, which is undefined behavior because Rc
doesn’t use atomic
operations. Its cousin sync::Arc
does use atomic operations (incurring
some overhead) and thus is Send
.
See the Nomicon and the Sync
trait for more details.
Implementors§
impl !Send for LocalWaker
impl !Send for Args
impl !Send for ArgsOs
impl Send for CompressedResponse
impl Send for LazyRequestBody
impl Send for Collection
impl Send for kvarn::shutdown::Manager
impl Send for kvarn::prelude::Bytes
impl Send for BytesMut
impl Send for kvarn::prelude::utils::prelude::compact_str::Drain<'_>
impl Send for Waker
impl Send for alloc::string::Drain<'_>
impl Send for AbortHandle
impl Send for Algorithm
impl Send for AtomicWaker
impl Send for AtomicWaker
impl Send for CCtx<'_>
impl Send for Collector
impl Send for Context
impl Send for DCtx<'_>
impl Send for EcdsaKeyPair
impl Send for Key
impl Send for KeyPair
impl Send for Parker
impl Send for PrivateKey
impl Send for PublicKey
impl Send for Select<'_>
impl Send for Unparker
impl<'a> Send for IoSlice<'a>
impl<'a> Send for IoSliceMut<'a>
impl<'a> Send for CDict<'a>
impl<'a> Send for DDict<'a>
impl<'a> Send for Notified<'a>
impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
impl<'a, 'i, K, S, M> Send for Iter<'i, K, S, M>
impl<'a, 'i, K, V> Send for Iter<'i, K, V>
impl<'a, 'i, K, V, S, M> Send for Iter<'i, K, V, S, M>
impl<'a, 'i, K, V, S, M> Send for IterMut<'i, K, V, S, M>
impl<'a, K, V> Send for OccupiedEntry<'a, K, V>
impl<'a, K, V> Send for Ref<'a, K, V>
impl<'a, K, V> Send for RefMulti<'a, K, V>
impl<'a, K, V> Send for RefMut<'a, K, V>
impl<'a, K, V> Send for RefMutMulti<'a, K, V>
impl<'a, K, V> Send for VacantEntry<'a, K, V>
impl<'a, R, T> Send for MappedMutexGuard<'a, R, T>
impl<'a, R, T> Send for MappedRwLockReadGuard<'a, R, T>
impl<'a, R, T> Send for MappedRwLockWriteGuard<'a, R, T>
impl<'a, T> Send for kvarn::prelude::utils::prelude::header::Drain<'a, T>where
T: Send,
impl<'a, T> Send for kvarn::prelude::utils::prelude::header::Iter<'a, T>where
T: Sync,
impl<'a, T> Send for kvarn::prelude::utils::prelude::header::IterMut<'a, T>where
T: Send,
impl<'a, T> Send for ValueDrain<'a, T>where
T: Send,
impl<'a, T> Send for ValueIterMut<'a, T>where
T: Send,
impl<'a, T> Send for ArcBorrow<'a, T>
impl<'a, T> Send for Drain<'a, T>where
T: Send + Array,
impl<'a, T> Send for MappedMutexGuard<'a, T>
impl<A> Send for SmallVec<A>where
A: Array,
<A as Array>::Item: Send,
impl<A, B> Send for ArcUnion<A, B>
impl<Dyn> Send for DynMetadata<Dyn>where
Dyn: ?Sized,
impl<Fut> Send for FuturesUnordered<Fut>where
Fut: Send,
impl<Fut> Send for IntoIter<Fut>
impl<Fut> Send for IterPinMut<'_, Fut>where
Fut: Send,
impl<Fut> Send for IterPinRef<'_, Fut>where
Fut: Send,
impl<H, T> Send for ThinArc<H, T>
impl<Id> Send for DecapsulationKey<Id>where
Id: AlgorithmIdentifier,
impl<Id> Send for EncapsulationKey<Id>where
Id: AlgorithmIdentifier,
impl<K, S> Send for OwningIter<K, S>
impl<K, V> Send for IterMut<'_, K, V>
impl<K, V, S> Send for Cache<K, V, S>
impl<K, V, S> Send for OwningIter<K, V, S>
impl<K, V, S> Send for SegmentedCache<K, V, S>
impl<K, V, S, A> Send for OccupiedEntry<'_, K, V, S, A>
impl<K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A>
impl<R, G> Send for RawReentrantMutex<R, G>
impl<R, G, T> Send for ReentrantMutex<R, G, T>
impl<R, T> Send for Mutex<R, T>
impl<R, T> Send for RwLock<R, T>
impl<S, C> Send for IoUring<S, C>where
S: EntryMarker,
C: EntryMarker,
impl<T> !Send for *const Twhere
T: ?Sized,
impl<T> !Send for *mut Twhere
T: ?Sized,
impl<T> !Send for NonNull<T>where
T: ?Sized,
NonNull
pointers are not Send
because the data they reference may be aliased.
impl<T> !Send for std::sync::mutex::MappedMutexGuard<'_, T>where
T: ?Sized,
impl<T> !Send for std::sync::mutex::MutexGuard<'_, T>where
T: ?Sized,
impl<T> !Send for ReentrantLockGuard<'_, T>where
T: ?Sized,
impl<T> !Send for std::sync::rwlock::MappedRwLockReadGuard<'_, T>where
T: ?Sized,
impl<T> !Send for std::sync::rwlock::MappedRwLockWriteGuard<'_, T>where
T: ?Sized,
impl<T> !Send for std::sync::rwlock::RwLockReadGuard<'_, T>where
T: ?Sized,
impl<T> !Send for std::sync::rwlock::RwLockWriteGuard<'_, T>where
T: ?Sized,
impl<T> Send for &T
impl<T> Send for kvarn::prelude::Mutex<T>
impl<T> Send for kvarn::prelude::RwLock<T>
impl<T> Send for Cell<T>
impl<T> Send for RefCell<T>
impl<T> Send for NonZero<T>where
T: ZeroablePrimitive + Send,
impl<T> Send for ChunksExactMut<'_, T>where
T: Send,
impl<T> Send for ChunksMut<'_, T>where
T: Send,
impl<T> Send for kvarn::prelude::utils::prelude::compact_str::core::slice::Iter<'_, T>where
T: Sync,
impl<T> Send for kvarn::prelude::utils::prelude::compact_str::core::slice::IterMut<'_, T>where
T: Send,
impl<T> Send for RChunksExactMut<'_, T>where
T: Send,
impl<T> Send for RChunksMut<'_, T>where
T: Send,
impl<T> Send for AtomicPtr<T>
Available on
target_has_atomic_load_store="ptr"
only.impl<T> Send for ThinBox<T>
ThinBox<T>
is Send
if T
is Send
because the data is owned.