Auto merge of #112006 - kylematsuda:earlybinder-private, r=jackh726

Make `EarlyBinder`'s inner value private

Currently, `EarlyBinder(T)`'s inner value is public, which allows implicitly skipping the binder by indexing into the tuple struct (i.e., `x.0`). `@lcnr` suggested making `EarlyBinder`'s inner value private so users are required to explicitly call `skip_binder` (https://github.com/rust-lang/rust/issues/105779#issuecomment-1549933424) .

This PR makes the inner value private, adds `EarlyBinder::new` for constructing a new instance, and replaces uses of `x.0` with `x.skip_binder()` (or similar). It also adds some documentation to `EarlyBinder::skip_binder` explaining how to skip the binder of `&EarlyBinder<T>` to get `&T` now that the inner value is private (since previously we could just do `&x.0`).

r? `@lcnr`
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index 7ed70ba..40cbf14 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -1,4 +1,6 @@
 use crate::fx::{FxHashMap, FxHasher};
+#[cfg(parallel_compiler)]
+use crate::sync::is_dyn_thread_safe;
 use crate::sync::{CacheAligned, Lock, LockGuard};
 use std::borrow::Borrow;
 use std::collections::hash_map::RawEntryMut;
@@ -18,6 +20,11 @@
 
 /// An array of cache-line aligned inner locked structures with convenience methods.
 pub struct Sharded<T> {
+    /// This mask is used to ensure that accesses are inbounds of `shards`.
+    /// When dynamic thread safety is off, this field is set to 0 causing only
+    /// a single shard to be used for greater cache efficiency.
+    #[cfg(parallel_compiler)]
+    mask: usize,
     shards: [CacheAligned<Lock<T>>; SHARDS],
 }
 
@@ -31,31 +38,54 @@
 impl<T> Sharded<T> {
     #[inline]
     pub fn new(mut value: impl FnMut() -> T) -> Self {
-        Sharded { shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))) }
+        Sharded {
+            #[cfg(parallel_compiler)]
+            mask: if is_dyn_thread_safe() { SHARDS - 1 } else { 0 },
+            shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
+        }
+    }
+
+    #[inline(always)]
+    fn mask(&self) -> usize {
+        #[cfg(parallel_compiler)]
+        {
+            if SHARDS == 1 { 0 } else { self.mask }
+        }
+        #[cfg(not(parallel_compiler))]
+        {
+            0
+        }
+    }
+
+    #[inline(always)]
+    fn count(&self) -> usize {
+        // `self.mask` is always one below the used shard count
+        self.mask() + 1
     }
 
     /// The shard is selected by hashing `val` with `FxHasher`.
     #[inline]
     pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
-        if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
+        self.get_shard_by_hash(if SHARDS == 1 { 0 } else { make_hash(val) })
     }
 
     #[inline]
     pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
-        &self.shards[get_shard_index_by_hash(hash)].0
+        self.get_shard_by_index(get_shard_hash(hash))
     }
 
     #[inline]
     pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
-        &self.shards[i].0
+        // SAFETY: The index get ANDed with the mask, ensuring it is always inbounds.
+        unsafe { &self.shards.get_unchecked(i & self.mask()).0 }
     }
 
     pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
-        (0..SHARDS).map(|i| self.shards[i].0.lock()).collect()
+        (0..self.count()).map(|i| self.get_shard_by_index(i).lock()).collect()
     }
 
     pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
-        (0..SHARDS).map(|i| self.shards[i].0.try_lock()).collect()
+        (0..self.count()).map(|i| self.get_shard_by_index(i).try_lock()).collect()
     }
 }
 
@@ -136,11 +166,9 @@
 /// `hash` can be computed with any hasher, so long as that hasher is used
 /// consistently for each `Sharded` instance.
 #[inline]
-#[allow(clippy::modulo_one)]
-pub fn get_shard_index_by_hash(hash: u64) -> usize {
+fn get_shard_hash(hash: u64) -> usize {
     let hash_len = mem::size_of::<usize>();
     // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
     // hashbrown also uses the lowest bits, so we can't use those
-    let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
-    bits % SHARDS
+    (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize
 }